From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jes Sorensen Date: Fri, 27 Mar 2009 16:19:30 +0000 Subject: Re: windows patch seems to have broken allocation on demand patches Message-Id: <49CCFC92.6050409@sgi.com> MIME-Version: 1 Content-Type: multipart/mixed; boundary="------------000201030103050004020007" List-Id: References: <49CCF816.10106@sgi.com> In-Reply-To: <49CCF816.10106@sgi.com> To: kvm-ia64@vger.kernel.org This is a multi-part message in MIME format. --------------000201030103050004020007 Content-Type: text/plain; charset=ISO-8859-1; format=flowed Content-Transfer-Encoding: 7bit Zhang, Xiantao wrote: > Which patches ? > Xiantao This is the one I keep applying - but modified to be against the latest kernel, which is broken. You came up with this due to the long startup time for KVM when booting large guests, like 64GB+ etc. Cheers, Jes > -----Original Message----- > From: Jes Sorensen [mailto:jes@sgi.com] > Sent: Saturday, March 28, 2009 12:00 AM > To: Zhang, Xiantao > Cc: kvm-ia64@vger.kernel.org > Subject: windows patch seems to have broken allocation on demand patches > > Hi Xiantao, > > The recent changes adding support for windows to KVM/ia64, seems to have > broken the allocation on demand patches, needed to boot with large > memory support. > > Is there any chance you have an update version of these patches? > > Cheers, > Jes --------------000201030103050004020007 Content-Type: text/x-patch; name="4000-kvm-ia64-allocate_on_demand.patch" Content-Transfer-Encoding: 7bit Content-Disposition: inline; filename="4000-kvm-ia64-allocate_on_demand.patch" --- arch/ia64/include/asm/kvm_host.h | 8 ++++++ arch/ia64/kvm/kvm-ia64.c | 49 ++++++++++++++++++++++++++++++++------- arch/ia64/kvm/misc.h | 7 +++++ arch/ia64/kvm/vtlb.c | 29 ++++++++++++++++++++++- 4 files changed, 84 insertions(+), 9 deletions(-) Index: linux-2.6.git/arch/ia64/include/asm/kvm_host.h =================================================================== --- linux-2.6.git.orig/arch/ia64/include/asm/kvm_host.h +++ linux-2.6.git/arch/ia64/include/asm/kvm_host.h @@ -39,6 +39,7 @@ #define EXIT_REASON_EXTERNAL_INTERRUPT 6 #define EXIT_REASON_IPI 7 #define EXIT_REASON_PTC_G 8 +#define EXIT_REASON_ALLOC_MEM 9 #define EXIT_REASON_DEBUG 20 /*Define vmm address space and vm data space.*/ @@ -313,6 +314,12 @@ struct kvm_vcpu *vcpu; }; +/* Alloc real memory exit */ +struct kvm_alloc_mem { + unsigned long gpfn; + unsigned long pmt_val; +}; + /*Exit control data */ struct exit_ctl_data{ uint32_t exit_reason; @@ -324,6 +331,7 @@ struct kvm_switch_rr6 rr_data; struct kvm_ipi_data ipi_data; struct kvm_ptc_g ptc_g_data; + struct kvm_alloc_mem alloc_mem; } u; }; Index: linux-2.6.git/arch/ia64/kvm/kvm-ia64.c =================================================================== --- linux-2.6.git.orig/arch/ia64/kvm/kvm-ia64.c +++ linux-2.6.git/arch/ia64/kvm/kvm-ia64.c @@ -212,6 +212,8 @@ { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 1; + printk(KERN_ERR"KVM: VM error occurs!"); + return 0; } @@ -481,6 +483,40 @@ return 1; } +static int handle_mem_alloc(struct kvm_vcpu *vcpu, + struct kvm_run *kvm_run) +{ + unsigned long pmt_val, gpfn, pfn, gpfn_off; + struct kvm_memory_slot *memslot; + struct exit_ctl_data *p = kvm_get_exit_data(vcpu); + + gpfn = p->u.alloc_mem.gpfn; + + spin_lock(&vcpu->kvm->mmu_lock); + pmt_val = kvm_get_pmt_entry(vcpu->kvm, gpfn); + if (!pmt_val) { + + pfn = gfn_to_pfn(vcpu->kvm, gpfn); + if (!pfn_valid(pfn)) + goto out; + + kvm_set_pmt_entry(vcpu->kvm, gpfn, pfn << PAGE_SHIFT, + _PAGE_AR_RWX | _PAGE_MA_WB); + + memslot = gfn_to_memslot(vcpu->kvm, gpfn); + if (!memslot) + goto out; + gpfn_off = gpfn - memslot->base_gfn; + memslot->rmap[gpfn_off] = (unsigned long)pfn_to_page(pfn); + pmt_val = kvm_get_pmt_entry(vcpu->kvm, gpfn); + } +out: + spin_unlock(&vcpu->kvm->mmu_lock); + p->u.alloc_mem.pmt_val = pmt_val; + + return 1; +} + static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) = { [EXIT_REASON_VM_PANIC] = handle_vm_error, @@ -492,6 +528,7 @@ [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_IPI] = handle_ipi, [EXIT_REASON_PTC_G] = handle_global_purge, + [EXIT_REASON_ALLOC_MEM] = handle_mem_alloc, [EXIT_REASON_DEBUG] = handle_vcpu_debug, }; @@ -1554,18 +1591,14 @@ for (i = 0; i < npages; i++) { pfn = gfn_to_pfn(kvm, base_gfn + i); - if (!kvm_is_mmio_pfn(pfn)) { - kvm_set_pmt_entry(kvm, base_gfn + i, - pfn << PAGE_SHIFT, - _PAGE_AR_RWX | _PAGE_MA_WB); - memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); - } else { + if (kvm_is_mmio_pfn(pfn)) { kvm_set_pmt_entry(kvm, base_gfn + i, GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), _PAGE_MA_UC); - memslot->rmap[i] = 0; - } + } + memslot->rmap[i] = 0; } + spin_unlock(&kvm->mmu_lock); return 0; } Index: linux-2.6.git/arch/ia64/kvm/misc.h =================================================================== --- linux-2.6.git.orig/arch/ia64/kvm/misc.h +++ linux-2.6.git/arch/ia64/kvm/misc.h @@ -41,6 +41,13 @@ pmt_base[gfn] = pte; } +static inline uint64_t kvm_get_pmt_entry(struct kvm *kvm, gfn_t gfn) +{ + uint64_t *pmt_base = kvm_host_get_pmt(kvm); + + return pmt_base[gfn]; +} + /*Function for translating host address to guest address*/ static inline void *to_guest(struct kvm *kvm, void *addr) Index: linux-2.6.git/arch/ia64/kvm/vtlb.c =================================================================== --- linux-2.6.git.orig/arch/ia64/kvm/vtlb.c +++ linux-2.6.git/arch/ia64/kvm/vtlb.c @@ -558,14 +558,41 @@ } } +static unsigned long alloc_real_maddr(unsigned long gpfn) +{ + struct exit_ctl_data *p = ¤t_vcpu->arch.exit_data; + unsigned long psr; + + local_irq_save(psr); + + p->exit_reason = EXIT_REASON_ALLOC_MEM; + p->u.alloc_mem.gpfn = gpfn; + p->u.alloc_mem.pmt_val = 0; + vmm_transition(current_vcpu); + + local_irq_restore(psr); + + return p->u.alloc_mem.pmt_val; +} + u64 kvm_get_mpt_entry(u64 gpfn) { u64 *base = (u64 *) KVM_P2M_BASE; + u64 pmt_val; if (gpfn >= (KVM_P2M_SIZE >> 3)) panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); - return *(base + gpfn); + pmt_val = *(base + gpfn); + + if (!pmt_val) { + pmt_val = alloc_real_maddr(gpfn); + if (!pmt_val) { + //printk(KERN_ERR"kvm: NO Enough memory!\n"); + panic_vm(current_vcpu, "not enough memory\n"); + } + } + return pmt_val; } u64 kvm_lookup_mpa(u64 gpfn) --------------000201030103050004020007--