From mboxrd@z Thu Jan 1 00:00:00 1970 From: Sheng Yang Subject: [PATCH][v4] PV extension of HVM(hybrid) support in Xen Date: Mon, 1 Mar 2010 17:43:41 +0800 Message-ID: <201003011743.41341.sheng@linux.intel.com> Mime-Version: 1.0 Content-Type: Multipart/Mixed; boundary="Boundary-00=_Nx4iLXjAorp2B28" Return-path: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: Keir Fraser Cc: Ian Pratt , xen-devel , Ian Campbell , Tim Deegan List-Id: xen-devel@lists.xenproject.org --Boundary-00=_Nx4iLXjAorp2B28 Content-Type: Text/Plain; charset="iso-8859-1" Content-Transfer-Encoding: 7bit Hi Keir Here is the latest (hybrid) patchset. Change from v3: 1. Minor polish the patchset. Replace several specific is_hvm_pv_evtchn_domain() judgement. 2. Name changed... Change from v2: 1. Change the name "hybrid" to "PV featured HVM", as well as flag and macro names. 2. Merge the hypercall page with HVM. 3. Clean up VIRQ delivery mechanism, fixing the lock issue. 4. Remove the reserved the region in E820(but another issue remains, I can't get location of grant table elegantly, as I described in Linux part patches) Please review, thanks! -- regards Yang, Sheng --Boundary-00=_Nx4iLXjAorp2B28 Content-Type: text/x-patch; charset="UTF-8"; name="hybrid-xen.patch" Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename="hybrid-xen.patch" diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -682,7 +682,16 @@ if ( is_hvm_vcpu(v) ) { + unsigned long eip, cs; + hvm_set_info_guest(v); + + eip = c(user_regs.eip); + if (eip != 0) { + cs = eip >> 12 << 8; + hvm_vcpu_reset_state(v, cs, 0); + hvm_funcs.set_tsc_offset(v, 0); + } goto out; } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2239,6 +2239,13 @@ { case VCPUOP_register_runstate_memory_area: case VCPUOP_get_runstate_info: + /* For evtchn on HVM */ + case VCPUOP_initialise: + case VCPUOP_up: + case VCPUOP_set_periodic_timer: + case VCPUOP_stop_periodic_timer: + case VCPUOP_set_singleshot_timer: + case VCPUOP_stop_singleshot_timer: rc = do_vcpu_op(cmd, vcpuid, arg); break; default: @@ -2310,6 +2317,7 @@ [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op, [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op, HYPERCALL(xen_version), + HYPERCALL(console_io), HYPERCALL(event_channel_op), HYPERCALL(sched_op), HYPERCALL(hvm_op) @@ -3109,6 +3117,36 @@ break; } + case HVMOP_enable_pv: { + struct xen_hvm_pv_type a; + struct domain *d; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = rcu_lock_target_domain_by_id(a.domid, &d); + if ( rc != 0 ) + return rc; + + rc = -EINVAL; + if ( !is_hvm_domain(d) ) + goto param_fail5; + + rc = xsm_hvm_param(d, op); + if ( rc ) + goto param_fail5; + + if (a.flags & HVM_PV_EVTCHN) { + update_domain_wallclock_time(d); + hvm_funcs.set_tsc_offset(d->vcpu[0], 0); + d->hvm_pv_enabled |= XEN_HVM_PV_EVTCHN_ENABLED; + printk("HVM: PV featured evtchn enabled\n"); + } +param_fail5: + rcu_unlock_domain(d); + break; + } + default: { gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op); diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -26,6 +26,32 @@ #include #include +/* Must be called with hvm_domain->irq_lock hold */ +static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq) +{ + struct vcpu *v = d->vcpu[0]; + int virq_pin = VIRQ_EMUL_PIN(pic_irq); + + if ( v->virq_to_evtchn[virq_pin] != 0 ) + { + send_guest_global_virq(d, virq_pin); + return; + } + vioapic_irq_positive_edge(d, ioapic_gsi); + vpic_irq_positive_edge(d, pic_irq); +} + +/* Must be called with hvm_domain->irq_lock hold */ +static void deassert_irq(struct domain *d, unsigned isa_irq) +{ + struct vcpu *v = d->vcpu[0]; + int virq_pin = VIRQ_EMUL_PIN(isa_irq); + + if ( v->virq_to_evtchn[virq_pin] == 0 ) + /* Not VIRQ drove */ + vpic_irq_negative_edge(d, isa_irq); +} + static void __hvm_pci_intx_assert( struct domain *d, unsigned int device, unsigned int intx) { @@ -45,10 +71,7 @@ isa_irq = hvm_irq->pci_link.route[link]; if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) ) - { - vioapic_irq_positive_edge(d, isa_irq); - vpic_irq_positive_edge(d, isa_irq); - } + assert_irq(d, isa_irq, isa_irq); } void hvm_pci_intx_assert( @@ -77,7 +100,7 @@ isa_irq = hvm_irq->pci_link.route[link]; if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq && (--hvm_irq->gsi_assert_count[isa_irq] == 0) ) - vpic_irq_negative_edge(d, isa_irq); + deassert_irq(d, isa_irq); } void hvm_pci_intx_deassert( @@ -100,10 +123,7 @@ if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) - { - vioapic_irq_positive_edge(d, gsi); - vpic_irq_positive_edge(d, isa_irq); - } + assert_irq(d, gsi, isa_irq); spin_unlock(&d->arch.hvm_domain.irq_lock); } @@ -120,7 +140,7 @@ if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) && (--hvm_irq->gsi_assert_count[gsi] == 0) ) - vpic_irq_negative_edge(d, isa_irq); + deassert_irq(d, isa_irq); spin_unlock(&d->arch.hvm_domain.irq_lock); } @@ -185,16 +205,16 @@ void hvm_assert_evtchn_irq(struct vcpu *v) { - if ( v->vcpu_id != 0 ) - return; - if ( unlikely(in_irq() || !local_irq_is_enabled()) ) { tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet); return; } - hvm_set_callback_irq_level(v); + if (is_hvm_pv_evtchn_vcpu(v)) + vcpu_kick(v); + else + hvm_set_callback_irq_level(v); } void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq) @@ -251,7 +271,7 @@ via_type = (uint8_t)(via >> 56) + 1; if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) || - (via_type > HVMIRQ_callback_pci_intx) ) + (via_type > HVMIRQ_callback_vector) ) via_type = HVMIRQ_callback_none; spin_lock(&d->arch.hvm_domain.irq_lock); @@ -297,6 +317,9 @@ if ( hvm_irq->callback_via_asserted ) __hvm_pci_intx_assert(d, pdev, pintx); break; + case HVMIRQ_callback_vector: + hvm_irq->callback_via.vector = (uint8_t)via; + break; default: break; } @@ -312,6 +335,10 @@ case HVMIRQ_callback_pci_intx: printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx); break; + case HVMIRQ_callback_vector: + printk("Set HVMIRQ_callback_vector to %u\n", + hvm_irq->callback_via.vector); + break; default: printk("None\n"); break; @@ -322,6 +349,10 @@ { struct hvm_domain *plat = &v->domain->arch.hvm_domain; int vector; + + if (plat->irq.callback_via_type == HVMIRQ_callback_vector && + vcpu_info(v, evtchn_upcall_pending)) + return hvm_intack_vector(plat->irq.callback_via.vector); if ( unlikely(v->nmi_pending) ) return hvm_intack_nmi; @@ -363,6 +394,8 @@ case hvm_intsrc_lapic: if ( !vlapic_ack_pending_irq(v, intack.vector) ) intack = hvm_intack_none; + break; + case hvm_intsrc_vector: break; default: intack = hvm_intack_none; diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -164,7 +164,8 @@ { HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); vmx_inject_extint(intack.vector); - pt_intr_post(v, intack); + if (intack.source != hvm_intsrc_vector) + pt_intr_post(v, intack); } /* Is there another IRQ to queue up behind this one? */ diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -686,6 +686,7 @@ struct domain *d = current->domain; /* Optionally shift out of the way of Viridian architectural leaves. */ uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000; + unsigned int tmp_eax, tmp_ebx, tmp_ecx, tmp_edx; idx -= base; if ( idx > 3 ) @@ -716,6 +717,14 @@ *edx = 0; /* Features 2 */ if ( !is_hvm_vcpu(current) ) *ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD; + + /* Check if additional feature specified, e.g. Hybrid */ + if ( !is_viridian_domain(d) ) { + domain_cpuid(d, 0x40000002, 0, + &tmp_eax, &tmp_ebx, &tmp_ecx, &tmp_edx); + if (tmp_edx != 0) + *edx = tmp_edx & XEN_CPUID_FEAT2_MASK; + } break; case 3: diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -33,7 +33,8 @@ hvm_intsrc_pic, hvm_intsrc_lapic, hvm_intsrc_nmi, - hvm_intsrc_mce + hvm_intsrc_mce, + hvm_intsrc_vector, }; struct hvm_intack { uint8_t source; /* enum hvm_intsrc */ @@ -44,6 +45,7 @@ #define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } ) #define hvm_intack_nmi ( (struct hvm_intack) { hvm_intsrc_nmi, 2 } ) #define hvm_intack_mce ( (struct hvm_intack) { hvm_intsrc_mce, 18 } ) +#define hvm_intack_vector(vec)( (struct hvm_intack) { hvm_intsrc_vector, vec } ) enum hvm_intblk { hvm_intblk_none, /* not blocked (deliverable) */ hvm_intblk_shadow, /* MOV-SS or STI shadow */ diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h --- a/xen/include/asm-x86/hvm/irq.h +++ b/xen/include/asm-x86/hvm/irq.h @@ -54,12 +54,14 @@ enum { HVMIRQ_callback_none, HVMIRQ_callback_gsi, - HVMIRQ_callback_pci_intx + HVMIRQ_callback_pci_intx, + HVMIRQ_callback_vector, } callback_via_type; }; union { uint32_t gsi; struct { uint8_t dev, intx; } pci; + uint32_t vector; } callback_via; /* Number of INTx wires asserting each PCI-ISA link. */ diff --git a/xen/include/public/arch-x86/cpuid.h b/xen/include/public/arch-x86/cpuid.h --- a/xen/include/public/arch-x86/cpuid.h +++ b/xen/include/public/arch-x86/cpuid.h @@ -65,4 +65,11 @@ #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) +/* Mask unsupported CPUID specified by user */ +#define XEN_CPUID_FEAT2_MASK 0x3ul +#define _XEN_CPUID_FEAT2_HVM_PV 0 +#define XEN_CPUID_FEAT2_HVM_PV (1u<<0) +#define _XEN_CPUID_FEAT2_HVM_PV_EVTCHN 1 +#define XEN_CPUID_FEAT2_HVM_PV_EVTCHN (1u<<1) + #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -127,6 +127,12 @@ typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); +#define HVMOP_enable_pv 9 +struct xen_hvm_pv_type { + domid_t domid; + uint32_t flags; +#define HVM_PV_EVTCHN (1ull<<1) +}; #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -158,7 +158,12 @@ #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 -#define NR_VIRQS 24 +#define VIRQ_EMUL_PIN_START 24 +#define VIRQ_EMUL_PIN_END 39 +#define VIRQ_EMUL_PIN_NUM 16 +#define VIRQ_EMUL_PIN(x) (VIRQ_EMUL_PIN_START + x) + +#define NR_VIRQS 40 /* * HYPERVISOR_mmu_update(reqs, count, pdone, foreigndom) diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -309,6 +309,9 @@ /* Memory paging support */ struct mem_event_domain mem_event; + +#define XEN_HVM_PV_EVTCHN_ENABLED (1u << 1) + uint64_t hvm_pv_enabled; }; struct domain_setup_info @@ -595,6 +598,9 @@ #define is_hvm_vcpu(v) (is_hvm_domain(v->domain)) #define need_iommu(d) ((d)->need_iommu) +#define is_hvm_pv_evtchn_domain(d) ((d)->hvm_pv_enabled & XEN_HVM_PV_EVTCHN_ENABLED) +#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain)) + void set_vcpu_migration_delay(unsigned int delay); unsigned int get_vcpu_migration_delay(void); --Boundary-00=_Nx4iLXjAorp2B28 Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel --Boundary-00=_Nx4iLXjAorp2B28--