From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Jan Beulich" Subject: [PATCH 4/6] x86: split struct domain Date: Tue, 05 Apr 2011 09:21:50 +0100 Message-ID: <4D9AED3E0200007800039F36@vpn.id2.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="=__Part604CBA0E.0__=" Return-path: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: "xen-devel@lists.xensource.com" List-Id: xen-devel@lists.xenproject.org This is a MIME message. If you are reading this text, you may want to consider changing to a mail reader or gateway that understands how to properly handle MIME multipart messages. --=__Part604CBA0E.0__= Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable Content-Disposition: inline This is accomplished by converting a couple of embedded arrays (in one case a structure containing an array) into separately allocated pointers, and (just as for struct arch_vcpu in a prior patch) overlaying some PV-only fields with HVM-only ones. One particularly noteworthy change in the opposite direction is that of PITState - this field so far lived in the HVM-only portion, but is being used by PV guests too, and hence needed to be moved out of struct hvm_domain. The change to XENMEM_set_memory_map (and hence libxl__build_pre() and the movement of the E820 related pieces to struct pv_domain) are subject to a positive response to a query sent to xen-devel regarding the need for this to happen for HVM guests (see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg01848.html). The protection of arch.hvm_domain.irq.dpci accesses by is_hvm_domain() is subject to confirmation that the field is used for HVM guests only (see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg02004.html). In the absence of any reply to these queries, and given the early state of 4.2 development, I think it should be acceptable to take the risk of having to later undo/redo some of this. Signed-off-by: Jan Beulich --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -72,9 +72,9 @@ int libxl__build_pre(libxl__gc *gc, uint libxl_ctx *ctx =3D libxl__gc_owner(gc); xc_domain_max_vcpus(ctx->xch, domid, info->max_vcpus); xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + LIBXL_MAXMEM= _CONSTANT); - xc_domain_set_memmap_limit(ctx->xch, domid,=20 - (info->hvm) ? info->max_memkb :=20 - (info->max_memkb + info->u.pv.slack_memkb)); + if (!info->hvm) + xc_domain_set_memmap_limit(ctx->xch, domid, + (info->max_memkb + info->u.pv.slack_memkb)); xc_domain_set_tsc_info(ctx->xch, domid, info->tsc_mode, 0, 0, 0); if ( info->disable_migrate ) xc_domain_disable_migrate(ctx->xch, domid); --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -187,16 +187,17 @@ struct domain *alloc_domain_struct(void) #ifdef __x86_64__ bits +=3D pfn_pdx_hole_shift; #endif - d =3D alloc_xenheap_pages(get_order_from_bytes(sizeof(*d)), MEMF_bits(= bits)); + BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE); + d =3D alloc_xenheap_pages(0, MEMF_bits(bits)); if ( d !=3D NULL ) - memset(d, 0, sizeof(*d)); + clear_page(d); return d; } =20 void free_domain_struct(struct domain *d) { lock_profile_deregister_struct(LOCKPROF_TYPE_PERDOM, d); - free_xenheap_pages(d, get_order_from_bytes(sizeof(*d))); + free_xenheap_page(d); } =20 struct vcpu *alloc_vcpu_struct(void) @@ -531,6 +532,17 @@ int arch_domain_create(struct domain *d, =20 if ( !is_idle_domain(d) ) { + d->arch.cpuids =3D xmalloc_array(cpuid_input_t, MAX_CPUID_INPUT); + rc =3D -ENOMEM; + if ( d->arch.cpuids =3D=3D NULL ) + goto fail; + memset(d->arch.cpuids, 0, MAX_CPUID_INPUT * sizeof(*d->arch.cpuids= )); + for ( i =3D 0; i < MAX_CPUID_INPUT; i++ ) + { + d->arch.cpuids[i].input[0] =3D XEN_CPUID_INPUT_UNUSED; + d->arch.cpuids[i].input[1] =3D XEN_CPUID_INPUT_UNUSED; + } + d->arch.ioport_caps =3D=20 rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex); rc =3D -ENOMEM; @@ -599,13 +611,6 @@ int arch_domain_create(struct domain *d, (CONFIG_PAGING_LEVELS !=3D 4); } =20 - memset(d->arch.cpuids, 0, sizeof(d->arch.cpuids)); - for ( i =3D 0; i < MAX_CPUID_INPUT; i++ ) - { - d->arch.cpuids[i].input[0] =3D XEN_CPUID_INPUT_UNUSED; - d->arch.cpuids[i].input[1] =3D XEN_CPUID_INPUT_UNUSED; - } - /* initialize default tsc behavior in case tools don't */ tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0); spin_lock_init(&d->arch.vtsc_lock); @@ -2067,11 +2072,12 @@ int domain_relinquish_resources(struct d unmap_vcpu_info(v); } =20 - if ( d->arch.pirq_eoi_map !=3D NULL ) + if ( d->arch.pv_domain.pirq_eoi_map !=3D NULL ) { - unmap_domain_page_global(d->arch.pirq_eoi_map); - put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn)); - d->arch.pirq_eoi_map =3D NULL; + unmap_domain_page_global(d->arch.pv_domain.pirq_eoi_map); + put_page_and_type( + mfn_to_page(d->arch.pv_domain.pirq_eoi_map_mfn)); + d->arch.pv_domain.pirq_eoi_map =3D NULL; } } =20 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -900,6 +900,10 @@ long arch_do_domctl( break; bind =3D &(domctl->u.bind_pt_irq); =20 + ret =3D -EINVAL; + if ( !is_hvm_domain(d) ) + goto bind_out; + ret =3D xsm_bind_pt_irq(d, bind); if ( ret ) goto bind_out; --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -237,8 +237,7 @@ static void hpet_set_timer(HPETState *h, { /* HPET specification requires PIT shouldn't generate * interrupts if LegacyReplacementRoute is set for timer0 */ - PITState *pit =3D &vhpet_domain(h)->arch.hvm_domain.pl_time.vpit; - pit_stop_channel0_irq(pit); + pit_stop_channel0_irq(&vhpet_domain(h)->arch.vpit); } =20 if ( !timer_enabled(h, tn) ) --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -412,7 +412,7 @@ static int hvm_print_line( =20 spin_lock(&hd->pbuf_lock); hd->pbuf[hd->pbuf_idx++] =3D c; - if ( (hd->pbuf_idx =3D=3D (sizeof(hd->pbuf) - 2)) || (c =3D=3D '\n') = ) + if ( (hd->pbuf_idx =3D=3D (HVM_PBUF_SIZE - 2)) || (c =3D=3D '\n') ) { if ( c !=3D '\n' ) hd->pbuf[hd->pbuf_idx++] =3D '\n'; @@ -443,6 +443,19 @@ int hvm_domain_initialise(struct domain=20 INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list); spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock); =20 + d->arch.hvm_domain.pbuf =3D xmalloc_array(char, HVM_PBUF_SIZE); + d->arch.hvm_domain.params =3D xmalloc_array(uint64_t, HVM_NR_PARAMS); + d->arch.hvm_domain.io_handler =3D xmalloc(struct hvm_io_handler); + rc =3D -ENOMEM; + if ( !d->arch.hvm_domain.pbuf || !d->arch.hvm_domain.params || + !d->arch.hvm_domain.io_handler ) + goto fail0; + memset(d->arch.hvm_domain.pbuf, 0, + HVM_PBUF_SIZE * sizeof(*d->arch.hvm_domain.pbuf)); + memset(d->arch.hvm_domain.params, 0, + HVM_NR_PARAMS * sizeof(*d->arch.hvm_domain.params)); + d->arch.hvm_domain.io_handler->num_slot =3D 0; + hvm_init_guest_time(d); =20 d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] =3D 1; @@ -480,6 +493,10 @@ int hvm_domain_initialise(struct domain=20 vioapic_deinit(d); fail1: hvm_destroy_cacheattr_region_list(d); + fail0: + xfree(d->arch.hvm_domain.io_handler); + xfree(d->arch.hvm_domain.params); + xfree(d->arch.hvm_domain.pbuf); return rc; } =20 @@ -500,6 +517,10 @@ void hvm_domain_relinquish_resources(str pmtimer_deinit(d); hpet_deinit(d); } + + xfree(d->arch.hvm_domain.io_handler); + xfree(d->arch.hvm_domain.params); + xfree(d->arch.hvm_domain.pbuf); } =20 void hvm_domain_destroy(struct domain *d) @@ -2533,10 +2554,20 @@ static long hvm_grant_table_op( =20 static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE(void) arg) { - long rc =3D do_memory_op(cmd, arg); - if ( (cmd & MEMOP_CMD_MASK) =3D=3D XENMEM_decrease_reservation ) + long rc; + + switch ( cmd & MEMOP_CMD_MASK ) + { + case XENMEM_memory_map: + case XENMEM_machine_memory_map: + case XENMEM_machphys_mapping: + return -ENOSYS; + case XENMEM_decrease_reservation: + rc =3D do_memory_op(cmd, arg); current->domain->arch.hvm_domain.qemu_mapcache_invalidate =3D 1; - return rc; + return rc; + } + return do_memory_op(cmd, arg); } =20 static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg) @@ -2613,10 +2644,20 @@ static long hvm_grant_table_op_compat32( =20 static long hvm_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg) { - long rc =3D compat_memory_op(cmd, arg); - if ( (cmd & MEMOP_CMD_MASK) =3D=3D XENMEM_decrease_reservation ) + int rc; + + switch ( cmd & MEMOP_CMD_MASK ) + { + case XENMEM_memory_map: + case XENMEM_machine_memory_map: + case XENMEM_machphys_mapping: + return -ENOSYS; + case XENMEM_decrease_reservation: + rc =3D compat_memory_op(cmd, arg); current->domain->arch.hvm_domain.qemu_mapcache_invalidate =3D 1; - return rc; + return rc; + } + return compat_memory_op(cmd, arg); } =20 static long hvm_vcpu_op_compat32( --- a/xen/arch/x86/hvm/i8254.c +++ b/xen/arch/x86/hvm/i8254.c @@ -38,10 +38,9 @@ #include #include =20 -#define domain_vpit(x) (&(x)->arch.hvm_domain.pl_time.vpit) +#define domain_vpit(x) (&(x)->arch.vpit) #define vcpu_vpit(x) (domain_vpit((x)->domain)) -#define vpit_domain(x) (container_of((x), struct domain, \ - arch.hvm_domain.pl_time.vpit)) +#define vpit_domain(x) (container_of((x), struct domain, arch.vpit)) #define vpit_vcpu(x) (pt_global_vcpu_target(vpit_domain(x))) =20 #define RW_STATE_LSB 1 @@ -450,14 +449,18 @@ void pit_reset(struct domain *d) =20 void pit_init(struct vcpu *v, unsigned long cpu_khz) { - PITState *pit =3D vcpu_vpit(v); + struct domain *d =3D v->domain; + PITState *pit =3D domain_vpit(d); =20 spin_lock_init(&pit->lock); =20 - register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io); - register_portio_handler(v->domain, 0x61, 1, handle_speaker_io); + if ( is_hvm_domain(d) ) + { + register_portio_handler(d, PIT_BASE, 4, handle_pit_io); + register_portio_handler(d, 0x61, 1, handle_speaker_io); + } =20 - pit_reset(v->domain); + pit_reset(d); } =20 void pit_deinit(struct domain *d) --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -195,8 +195,7 @@ static int process_portio_intercept(port int hvm_io_intercept(ioreq_t *p, int type) { struct vcpu *v =3D current; - struct hvm_io_handler *handler =3D - &v->domain->arch.hvm_domain.io_handler; + struct hvm_io_handler *handler =3D v->domain->arch.hvm_domain.io_handl= er; int i; unsigned long addr, size; =20 @@ -230,7 +229,7 @@ void register_io_handler( struct domain *d, unsigned long addr, unsigned long size, void *action, int type) { - struct hvm_io_handler *handler =3D &d->arch.hvm_domain.io_handler; + struct hvm_io_handler *handler =3D d->arch.hvm_domain.io_handler; int num =3D handler->num_slot; =20 BUG_ON(num >=3D MAX_IO_HANDLER); @@ -246,7 +245,7 @@ void relocate_io_handler( struct domain *d, unsigned long old_addr, unsigned long new_addr, unsigned long size, int type) { - struct hvm_io_handler *handler =3D &d->arch.hvm_domain.io_handler; + struct hvm_io_handler *handler =3D d->arch.hvm_domain.io_handler; int i; =20 for ( i =3D 0; i < handler->num_slot; i++ ) --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -272,8 +272,7 @@ static void ioapic_inj_irq( =20 static inline int pit_channel0_enabled(void) { - PITState *pit =3D ¤t->domain->arch.hvm_domain.pl_time.vpit; - return pt_active(&pit->pt0); + return pt_active(¤t->domain->arch.vpit.pt0); } =20 static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq) --- a/xen/arch/x86/hvm/vpt.c +++ b/xen/arch/x86/hvm/vpt.c @@ -463,17 +463,20 @@ static void pt_adjust_vcpu(struct period =20 void pt_adjust_global_vcpu_target(struct vcpu *v) { + struct PITState *vpit; struct pl_time *pl_time; int i; =20 if ( v =3D=3D NULL ) return; =20 - pl_time =3D &v->domain->arch.hvm_domain.pl_time; + vpit =3D &v->domain->arch.vpit; + + spin_lock(&vpit->lock); + pt_adjust_vcpu(&vpit->pt0, v); + spin_unlock(&vpit->lock); =20 - spin_lock(&pl_time->vpit.lock); - pt_adjust_vcpu(&pl_time->vpit.pt0, v); - spin_unlock(&pl_time->vpit.lock); + pl_time =3D &v->domain->arch.hvm_domain.pl_time; =20 spin_lock(&pl_time->vrtc.lock); pt_adjust_vcpu(&pl_time->vrtc.pt, v); @@ -507,7 +510,7 @@ void pt_may_unmask_irq(struct domain *d, =20 if ( d ) { - pt_resume(&d->arch.hvm_domain.pl_time.vpit.pt0); + pt_resume(&d->arch.vpit.pt0); pt_resume(&d->arch.hvm_domain.pl_time.vrtc.pt); for ( i =3D 0; i < HPET_TIMER_NUM; i++ ) pt_resume(&d->arch.hvm_domain.pl_time.vhpet.pt[i]); --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -764,14 +764,14 @@ bool_t cpu_has_pending_apic_eoi(void) =20 static inline void set_pirq_eoi(struct domain *d, unsigned int irq) { - if ( d->arch.pirq_eoi_map ) - set_bit(irq, d->arch.pirq_eoi_map); + if ( !is_hvm_domain(d) && d->arch.pv_domain.pirq_eoi_map ) + set_bit(irq, d->arch.pv_domain.pirq_eoi_map); } =20 static inline void clear_pirq_eoi(struct domain *d, unsigned int irq) { - if ( d->arch.pirq_eoi_map ) - clear_bit(irq, d->arch.pirq_eoi_map); + if ( !is_hvm_domain(d) && d->arch.pv_domain.pirq_eoi_map ) + clear_bit(irq, d->arch.pv_domain.pirq_eoi_map); } =20 static void _irq_guest_eoi(struct irq_desc *desc) --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4710,7 +4710,7 @@ long arch_memory_op(int op, XEN_GUEST_HA if ( copy_from_guest(&fmap, arg, 1) ) return -EFAULT; =20 - if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.e820) ) + if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.pv_domain.e820) ) return -EINVAL; =20 rc =3D rcu_lock_target_domain_by_id(fmap.domid, &d); @@ -4724,9 +4724,15 @@ long arch_memory_op(int op, XEN_GUEST_HA return rc; } =20 - rc =3D copy_from_guest(d->arch.e820, fmap.map.buffer, + if ( is_hvm_domain(d) ) + { + rcu_unlock_domain(d); + return -EPERM; + } + + rc =3D copy_from_guest(d->arch.pv_domain.e820, fmap.map.buffer, fmap.map.nr_entries) ? -EFAULT : 0; - d->arch.nr_e820 =3D fmap.map.nr_entries; + d->arch.pv_domain.nr_e820 =3D fmap.map.nr_entries; =20 rcu_unlock_domain(d); return rc; @@ -4738,14 +4744,15 @@ long arch_memory_op(int op, XEN_GUEST_HA struct domain *d =3D current->domain; =20 /* Backwards compatibility. */ - if ( d->arch.nr_e820 =3D=3D 0 ) + if ( d->arch.pv_domain.nr_e820 =3D=3D 0 ) return -ENOSYS; =20 if ( copy_from_guest(&map, arg, 1) ) return -EFAULT; =20 - map.nr_entries =3D min(map.nr_entries, d->arch.nr_e820); - if ( copy_to_guest(map.buffer, d->arch.e820, map.nr_entries) || + map.nr_entries =3D min(map.nr_entries, d->arch.pv_domain.nr_e820);= + if ( copy_to_guest(map.buffer, d->arch.pv_domain.e820, + map.nr_entries) || copy_to_guest(arg, &map, 1) ) return -EFAULT; =20 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -264,7 +264,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H ret =3D -EINVAL; if ( eoi.irq >=3D v->domain->nr_pirqs ) break; - if ( v->domain->arch.pirq_eoi_map ) + if ( !is_hvm_domain(v->domain) && + v->domain->arch.pv_domain.pirq_eoi_map ) evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]); if ( !is_hvm_domain(v->domain) || domain_pirq_to_emuirq(v->domain, eoi.irq) =3D=3D IRQ_PT ) @@ -289,17 +290,18 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H PGT_writable_page) ) break; =20 - if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn, 0, mfn) !=3D 0 ) + if ( cmpxchg(&v->domain->arch.pv_domain.pirq_eoi_map_mfn, + 0, mfn) !=3D 0 ) { put_page_and_type(mfn_to_page(mfn)); ret =3D -EBUSY; break; } =20 - v->domain->arch.pirq_eoi_map =3D map_domain_page_global(mfn); - if ( v->domain->arch.pirq_eoi_map =3D=3D NULL ) + v->domain->arch.pv_domain.pirq_eoi_map =3D map_domain_page_global(= mfn); + if ( v->domain->arch.pv_domain.pirq_eoi_map =3D=3D NULL ) { - v->domain->arch.pirq_eoi_map_mfn =3D 0; + v->domain->arch.pv_domain.pirq_eoi_map_mfn =3D 0; put_page_and_type(mfn_to_page(mfn)); ret =3D -ENOSPC; break; --- a/xen/drivers/passthrough/io.c +++ b/xen/drivers/passthrough/io.c @@ -85,6 +85,14 @@ static void pt_irq_time_out(void *data) } } =20 +struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *d) +{ + if ( !d || !is_hvm_domain(d) ) + return NULL; + + return d->arch.hvm_domain.irq.dpci; +} + void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci) { xfree(dpci->mirq); @@ -150,12 +158,7 @@ int pt_irq_create_bind_vtd( for ( int i =3D 0; i < NR_HVM_IRQS; i++ ) INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]); =20 - if ( domain_set_irq_dpci(d, hvm_irq_dpci) =3D=3D 0 ) - { - spin_unlock(&d->event_lock); - free_hvm_irq_dpci(hvm_irq_dpci); - return -EINVAL; - } + d->arch.hvm_domain.irq.dpci =3D hvm_irq_dpci; } =20 if ( pt_irq_bind->irq_type =3D=3D PT_IRQ_TYPE_MSI ) @@ -392,8 +395,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un struct hvm_irq_dpci *dpci =3D domain_get_irq_dpci(d); =20 ASSERT(spin_is_locked(&irq_desc[domain_pirq_to_irq(d, mirq)].lock)); - if ( !iommu_enabled || (d =3D=3D dom0) || !dpci || - !test_bit(mirq, dpci->mapping)) + if ( !iommu_enabled || !dpci || !test_bit(mirq, dpci->mapping)) return 0; =20 set_bit(mirq, dpci->dirq_mask); --- a/xen/drivers/passthrough/vtd/ia64/vtd.c +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c @@ -70,23 +70,6 @@ void *__init map_to_nocache_virt(int nr_ return (void *) ( maddr + __IA64_UNCACHED_OFFSET); } =20 -struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain) -{ - if ( !domain ) - return NULL; - - return domain->arch.hvm_domain.irq.dpci; -} - -int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci) -{ - if ( !domain || !dpci ) - return 0; - - domain->arch.hvm_domain.irq.dpci =3D dpci; - return 1; -} - void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) { /* dummy */ --- a/xen/drivers/passthrough/vtd/x86/vtd.c +++ b/xen/drivers/passthrough/vtd/x86/vtd.c @@ -68,23 +68,6 @@ void *__init map_to_nocache_virt(int nr_ return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus); } =20 -struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain) -{ - if ( !domain ) - return NULL; - - return domain->arch.hvm_domain.irq.dpci; -} - -int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci) -{ - if ( !domain || !dpci ) - return 0; - - domain->arch.hvm_domain.irq.dpci =3D dpci; - return 1; -} - void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) { struct hvm_irq *hvm_irq =3D &d->arch.hvm_domain.irq; --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -231,6 +231,17 @@ struct time_scale { u32 mul_frac; }; =20 +struct pv_domain +{ + /* Shared page for notifying that explicit PIRQ EOI is required. */ + unsigned long *pirq_eoi_map; + unsigned long pirq_eoi_map_mfn; + + /* Pseudophysical e820 map (XENMEM_memory_map). */ + struct e820entry e820[3]; + unsigned int nr_e820; +}; + struct arch_domain { #ifdef CONFIG_X86_64 @@ -253,7 +264,11 @@ struct arch_domain uint32_t pci_cf8; =20 struct list_head pdev_list; - struct hvm_domain hvm_domain; + + union { + struct pv_domain pv_domain; + struct hvm_domain hvm_domain; + }; =20 struct paging_domain paging; struct p2m_domain *p2m; @@ -265,14 +280,6 @@ struct arch_domain int *emuirq_pirq; int *pirq_emuirq; =20 - /* Shared page for notifying that explicit PIRQ EOI is required. */ - unsigned long *pirq_eoi_map; - unsigned long pirq_eoi_map_mfn; - - /* Pseudophysical e820 map (XENMEM_memory_map). */ - struct e820entry e820[3]; - unsigned int nr_e820; - /* Maximum physical-address bitwidth supported by this guest. */ unsigned int physaddr_bitsize; =20 @@ -294,7 +301,9 @@ struct arch_domain } relmem; struct page_list_head relmem_list; =20 - cpuid_input_t cpuids[MAX_CPUID_INPUT]; + cpuid_input_t *cpuids; + + struct PITState vpit; =20 /* For Guest vMCA handling */ struct domain_mca_msrs *vmca_msrs; --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -47,7 +47,7 @@ struct hvm_domain { =20 struct pl_time pl_time; =20 - struct hvm_io_handler io_handler; + struct hvm_io_handler *io_handler; =20 /* Lock protects access to irq, vpic and vioapic. */ spinlock_t irq_lock; @@ -60,11 +60,12 @@ struct hvm_domain { struct vcpu *i8259_target; =20 /* hvm_print_line() logging. */ - char pbuf[80]; +#define HVM_PBUF_SIZE 80 + char *pbuf; int pbuf_idx; spinlock_t pbuf_lock; =20 - uint64_t params[HVM_NR_PARAMS]; + uint64_t *params; =20 /* Memory ranges with pinned cache attributes. */ struct list_head pinned_cacheattr_ranges; --- a/xen/include/asm-x86/hvm/vpt.h +++ b/xen/include/asm-x86/hvm/vpt.h @@ -124,7 +124,6 @@ typedef struct PMTState { } PMTState; =20 struct pl_time { /* platform time */ - struct PITState vpit; struct RTCState vrtc; struct HPETState vhpet; struct PMTState vpmt; @@ -143,7 +142,9 @@ void pt_migrate(struct vcpu *v); =20 void pt_adjust_global_vcpu_target(struct vcpu *v); #define pt_global_vcpu_target(d) \ - ((d)->arch.hvm_domain.i8259_target ? : (d)->vcpu ? (d)->vcpu[0] : = NULL) + (is_hvm_domain(d) && (d)->arch.hvm_domain.i8259_target ? \ + (d)->arch.hvm_domain.i8259_target : \ + (d)->vcpu ? (d)->vcpu[0] : NULL) =20 void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt);= =20 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -106,8 +106,7 @@ struct qi_ctrl *iommu_qi_ctrl(struct iom struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu); struct iommu_flush *iommu_get_flush(struct iommu *iommu); void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq); -struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain); -int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci = *dpci); +struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *); void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci); bool_t pt_irq_need_timer(uint32_t flags); =20 --=__Part604CBA0E.0__= Content-Type: text/plain; name="x86-domain-struct-split.patch" Content-Transfer-Encoding: quoted-printable Content-Disposition: attachment; filename="x86-domain-struct-split.patch" This is accomplished by converting a couple of embedded arrays (in = one=0Acase a structure containing an array) into separately allocated=0Apoi= nters, and (just as for struct arch_vcpu in a prior patch) overlaying=0Asom= e PV-only fields with HVM-only ones.=0A=0AOne particularly noteworthy = change in the opposite direction is that of=0APITState - this field so far = lived in the HVM-only portion, but is being=0Aused by PV guests too, and = hence needed to be moved out of struct=0Ahvm_domain.=0A=0AThe change to = XENMEM_set_memory_map (and hence libxl__build_pre() and=0Athe movement of = the E820 related pieces to struct pv_domain) are subject=0Ato a positive = response to a query sent to xen-devel regarding the need=0Afor this to = happen for HVM guests (see=0Ahttp://lists.xensource.com/archives/html/xen-d= evel/2011-03/msg01848.html).=0A=0AThe protection of arch.hvm_domain.irq.dpc= i accesses by is_hvm_domain()=0Ais subject to confirmation that the field = is used for HVM guests only (see=0Ahttp://lists.xensource.com/archives/html= /xen-devel/2011-03/msg02004.html).=0A=0AIn the absence of any reply to = these queries, and given the early state of=0A4.2 development, I think it = should be acceptable to take the risk of=0Ahaving to later undo/redo some = of this.=0A=0ASigned-off-by: Jan Beulich =0A=0A--- = a/tools/libxl/libxl_dom.c=0A+++ b/tools/libxl/libxl_dom.c=0A@@ -72,9 +72,9 = @@ int libxl__build_pre(libxl__gc *gc, uint=0A libxl_ctx *ctx =3D = libxl__gc_owner(gc);=0A xc_domain_max_vcpus(ctx->xch, domid, info->max_= vcpus);=0A xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + = LIBXL_MAXMEM_CONSTANT);=0A- xc_domain_set_memmap_limit(ctx->xch, domid, = =0A- (info->hvm) ? info->max_memkb : =0A- (info->max_= memkb + info->u.pv.slack_memkb));=0A+ if (!info->hvm)=0A+ = xc_domain_set_memmap_limit(ctx->xch, domid,=0A+ (info->max_m= emkb + info->u.pv.slack_memkb));=0A xc_domain_set_tsc_info(ctx->xch, = domid, info->tsc_mode, 0, 0, 0);=0A if ( info->disable_migrate )=0A = xc_domain_disable_migrate(ctx->xch, domid);=0A--- a/xen/arch/x86/domai= n.c=0A+++ b/xen/arch/x86/domain.c=0A@@ -187,16 +187,17 @@ struct domain = *alloc_domain_struct(void)=0A #ifdef __x86_64__=0A bits +=3D pfn_pdx_ho= le_shift;=0A #endif=0A- d =3D alloc_xenheap_pages(get_order_from_bytes(s= izeof(*d)), MEMF_bits(bits));=0A+ BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);= =0A+ d =3D alloc_xenheap_pages(0, MEMF_bits(bits));=0A if ( d !=3D = NULL )=0A- memset(d, 0, sizeof(*d));=0A+ clear_page(d);=0A = return d;=0A }=0A =0A void free_domain_struct(struct domain *d)=0A {=0A = lock_profile_deregister_struct(LOCKPROF_TYPE_PERDOM, d);=0A- = free_xenheap_pages(d, get_order_from_bytes(sizeof(*d)));=0A+ free_xenhea= p_page(d);=0A }=0A =0A struct vcpu *alloc_vcpu_struct(void)=0A@@ -531,6 = +532,17 @@ int arch_domain_create(struct domain *d,=0A =0A if ( = !is_idle_domain(d) )=0A {=0A+ d->arch.cpuids =3D xmalloc_array(c= puid_input_t, MAX_CPUID_INPUT);=0A+ rc =3D -ENOMEM;=0A+ if ( = d->arch.cpuids =3D=3D NULL )=0A+ goto fail;=0A+ = memset(d->arch.cpuids, 0, MAX_CPUID_INPUT * sizeof(*d->arch.cpuids));=0A+ = for ( i =3D 0; i < MAX_CPUID_INPUT; i++ )=0A+ {=0A+ = d->arch.cpuids[i].input[0] =3D XEN_CPUID_INPUT_UNUSED;=0A+ = d->arch.cpuids[i].input[1] =3D XEN_CPUID_INPUT_UNUSED;=0A+ }=0A+=0A = d->arch.ioport_caps =3D =0A rangeset_new(d, "I/O = Ports", RANGESETF_prettyprint_hex);=0A rc =3D -ENOMEM;=0A@@ = -599,13 +611,6 @@ int arch_domain_create(struct domain *d,=0A = (CONFIG_PAGING_LEVELS !=3D 4);=0A }=0A =0A- memset(d->arch.cpuids, = 0, sizeof(d->arch.cpuids));=0A- for ( i =3D 0; i < MAX_CPUID_INPUT; i++ = )=0A- {=0A- d->arch.cpuids[i].input[0] =3D XEN_CPUID_INPUT_UNUSED= ;=0A- d->arch.cpuids[i].input[1] =3D XEN_CPUID_INPUT_UNUSED;=0A- = }=0A-=0A /* initialize default tsc behavior in case tools don't */=0A = tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);=0A spin_lock_init(&d->= arch.vtsc_lock);=0A@@ -2067,11 +2072,12 @@ int domain_relinquish_resources(= struct d=0A unmap_vcpu_info(v);=0A }=0A =0A- = if ( d->arch.pirq_eoi_map !=3D NULL )=0A+ if ( = d->arch.pv_domain.pirq_eoi_map !=3D NULL )=0A {=0A- = unmap_domain_page_global(d->arch.pirq_eoi_map);=0A- = put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));=0A- = d->arch.pirq_eoi_map =3D NULL;=0A+ unmap_domain_page_globa= l(d->arch.pv_domain.pirq_eoi_map);=0A+ put_page_and_type(=0A= + mfn_to_page(d->arch.pv_domain.pirq_eoi_map_mfn));=0A+ = d->arch.pv_domain.pirq_eoi_map =3D NULL;=0A = }=0A }=0A =0A--- a/xen/arch/x86/domctl.c=0A+++ b/xen/arch/x86/domct= l.c=0A@@ -900,6 +900,10 @@ long arch_do_domctl(=0A break;=0A = bind =3D &(domctl->u.bind_pt_irq);=0A =0A+ ret =3D -EINVAL;=0A= + if ( !is_hvm_domain(d) )=0A+ goto bind_out;=0A+=0A = ret =3D xsm_bind_pt_irq(d, bind);=0A if ( ret )=0A = goto bind_out;=0A--- a/xen/arch/x86/hvm/hpet.c=0A+++ b/xen/arch/x86/hvm/hpe= t.c=0A@@ -237,8 +237,7 @@ static void hpet_set_timer(HPETState *h,=0A = {=0A /* HPET specification requires PIT shouldn't generate=0A = * interrupts if LegacyReplacementRoute is set for timer0 */=0A- = PITState *pit =3D &vhpet_domain(h)->arch.hvm_domain.pl_time.vpit;=0A- = pit_stop_channel0_irq(pit);=0A+ pit_stop_channel0_irq(&vhpet_domai= n(h)->arch.vpit);=0A }=0A =0A if ( !timer_enabled(h, tn) )=0A--- = a/xen/arch/x86/hvm/hvm.c=0A+++ b/xen/arch/x86/hvm/hvm.c=0A@@ -412,7 +412,7 = @@ static int hvm_print_line(=0A =0A spin_lock(&hd->pbuf_lock);=0A = hd->pbuf[hd->pbuf_idx++] =3D c;=0A- if ( (hd->pbuf_idx =3D=3D (sizeof(hd= ->pbuf) - 2)) || (c =3D=3D '\n') )=0A+ if ( (hd->pbuf_idx =3D=3D = (HVM_PBUF_SIZE - 2)) || (c =3D=3D '\n') )=0A {=0A if ( c !=3D = '\n' )=0A hd->pbuf[hd->pbuf_idx++] =3D '\n';=0A@@ -443,6 = +443,19 @@ int hvm_domain_initialise(struct domain =0A INIT_LIST_HEAD(&= d->arch.hvm_domain.msixtbl_list);=0A spin_lock_init(&d->arch.hvm_domain= .msixtbl_list_lock);=0A =0A+ d->arch.hvm_domain.pbuf =3D xmalloc_array(c= har, HVM_PBUF_SIZE);=0A+ d->arch.hvm_domain.params =3D xmalloc_array(uin= t64_t, HVM_NR_PARAMS);=0A+ d->arch.hvm_domain.io_handler =3D xmalloc(str= uct hvm_io_handler);=0A+ rc =3D -ENOMEM;=0A+ if ( !d->arch.hvm_domain= .pbuf || !d->arch.hvm_domain.params ||=0A+ !d->arch.hvm_domain.io_h= andler )=0A+ goto fail0;=0A+ memset(d->arch.hvm_domain.pbuf, = 0,=0A+ HVM_PBUF_SIZE * sizeof(*d->arch.hvm_domain.pbuf));=0A+ = memset(d->arch.hvm_domain.params, 0,=0A+ HVM_NR_PARAMS * = sizeof(*d->arch.hvm_domain.params));=0A+ d->arch.hvm_domain.io_handler->= num_slot =3D 0;=0A+=0A hvm_init_guest_time(d);=0A =0A d->arch.hvm_d= omain.params[HVM_PARAM_HPET_ENABLED] =3D 1;=0A@@ -480,6 +493,10 @@ int = hvm_domain_initialise(struct domain =0A vioapic_deinit(d);=0A = fail1:=0A hvm_destroy_cacheattr_region_list(d);=0A+ fail0:=0A+ = xfree(d->arch.hvm_domain.io_handler);=0A+ xfree(d->arch.hvm_domain.param= s);=0A+ xfree(d->arch.hvm_domain.pbuf);=0A return rc;=0A }=0A =0A@@ = -500,6 +517,10 @@ void hvm_domain_relinquish_resources(str=0A = pmtimer_deinit(d);=0A hpet_deinit(d);=0A }=0A+=0A+ = xfree(d->arch.hvm_domain.io_handler);=0A+ xfree(d->arch.hvm_domain.param= s);=0A+ xfree(d->arch.hvm_domain.pbuf);=0A }=0A =0A void hvm_domain_dest= roy(struct domain *d)=0A@@ -2533,10 +2554,20 @@ static long hvm_grant_table= _op(=0A =0A static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE(void) = arg)=0A {=0A- long rc =3D do_memory_op(cmd, arg);=0A- if ( (cmd & = MEMOP_CMD_MASK) =3D=3D XENMEM_decrease_reservation )=0A+ long rc;=0A+=0A= + switch ( cmd & MEMOP_CMD_MASK )=0A+ {=0A+ case XENMEM_memory_map= :=0A+ case XENMEM_machine_memory_map:=0A+ case XENMEM_machphys_mappin= g:=0A+ return -ENOSYS;=0A+ case XENMEM_decrease_reservation:=0A+ = rc =3D do_memory_op(cmd, arg);=0A current->domain->arch.hvm_= domain.qemu_mapcache_invalidate =3D 1;=0A- return rc;=0A+ return = rc;=0A+ }=0A+ return do_memory_op(cmd, arg);=0A }=0A =0A static long = hvm_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)=0A@@ -2613,10 +2644,20 = @@ static long hvm_grant_table_op_compat32(=0A =0A static long hvm_memory_o= p_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)=0A {=0A- long rc =3D = compat_memory_op(cmd, arg);=0A- if ( (cmd & MEMOP_CMD_MASK) =3D=3D = XENMEM_decrease_reservation )=0A+ int rc;=0A+=0A+ switch ( cmd & = MEMOP_CMD_MASK )=0A+ {=0A+ case XENMEM_memory_map:=0A+ case = XENMEM_machine_memory_map:=0A+ case XENMEM_machphys_mapping:=0A+ = return -ENOSYS;=0A+ case XENMEM_decrease_reservation:=0A+ rc =3D = compat_memory_op(cmd, arg);=0A current->domain->arch.hvm_domain.qem= u_mapcache_invalidate =3D 1;=0A- return rc;=0A+ return rc;=0A+ = }=0A+ return compat_memory_op(cmd, arg);=0A }=0A =0A static long = hvm_vcpu_op_compat32(=0A--- a/xen/arch/x86/hvm/i8254.c=0A+++ b/xen/arch/x86= /hvm/i8254.c=0A@@ -38,10 +38,9 @@=0A #include =0A #include = =0A =0A-#define domain_vpit(x) (&(x)->arch.hvm_domain.pl_tim= e.vpit)=0A+#define domain_vpit(x) (&(x)->arch.vpit)=0A #define vcpu_vpit(x)= (domain_vpit((x)->domain))=0A-#define vpit_domain(x) (container_of((x), = struct domain, \=0A- arch.hvm_domain.pl= _time.vpit))=0A+#define vpit_domain(x) (container_of((x), struct domain, = arch.vpit))=0A #define vpit_vcpu(x) (pt_global_vcpu_target(vpit_domain(x)= ))=0A =0A #define RW_STATE_LSB 1=0A@@ -450,14 +449,18 @@ void pit_reset(str= uct domain *d)=0A =0A void pit_init(struct vcpu *v, unsigned long = cpu_khz)=0A {=0A- PITState *pit =3D vcpu_vpit(v);=0A+ struct domain = *d =3D v->domain;=0A+ PITState *pit =3D domain_vpit(d);=0A =0A = spin_lock_init(&pit->lock);=0A =0A- register_portio_handler(v->domain, = PIT_BASE, 4, handle_pit_io);=0A- register_portio_handler(v->domain, = 0x61, 1, handle_speaker_io);=0A+ if ( is_hvm_domain(d) )=0A+ {=0A+ = register_portio_handler(d, PIT_BASE, 4, handle_pit_io);=0A+ = register_portio_handler(d, 0x61, 1, handle_speaker_io);=0A+ }=0A =0A- = pit_reset(v->domain);=0A+ pit_reset(d);=0A }=0A =0A void pit_deinit(str= uct domain *d)=0A--- a/xen/arch/x86/hvm/intercept.c=0A+++ b/xen/arch/x86/hv= m/intercept.c=0A@@ -195,8 +195,7 @@ static int process_portio_intercept(por= t=0A int hvm_io_intercept(ioreq_t *p, int type)=0A {=0A struct vcpu *v = =3D current;=0A- struct hvm_io_handler *handler =3D=0A- = &v->domain->arch.hvm_domain.io_handler;=0A+ struct hvm_io_handler = *handler =3D v->domain->arch.hvm_domain.io_handler;=0A int i;=0A = unsigned long addr, size;=0A =0A@@ -230,7 +229,7 @@ void register_io_handle= r(=0A struct domain *d, unsigned long addr, unsigned long size,=0A = void *action, int type)=0A {=0A- struct hvm_io_handler *handler =3D = &d->arch.hvm_domain.io_handler;=0A+ struct hvm_io_handler *handler =3D = d->arch.hvm_domain.io_handler;=0A int num =3D handler->num_slot;=0A = =0A BUG_ON(num >=3D MAX_IO_HANDLER);=0A@@ -246,7 +245,7 @@ void = relocate_io_handler(=0A struct domain *d, unsigned long old_addr, = unsigned long new_addr,=0A unsigned long size, int type)=0A {=0A- = struct hvm_io_handler *handler =3D &d->arch.hvm_domain.io_handler;=0A+ = struct hvm_io_handler *handler =3D d->arch.hvm_domain.io_handler;=0A = int i;=0A =0A for ( i =3D 0; i < handler->num_slot; i++ )=0A--- = a/xen/arch/x86/hvm/vioapic.c=0A+++ b/xen/arch/x86/hvm/vioapic.c=0A@@ = -272,8 +272,7 @@ static void ioapic_inj_irq(=0A =0A static inline int = pit_channel0_enabled(void)=0A {=0A- PITState *pit =3D ¤t->domain->= arch.hvm_domain.pl_time.vpit;=0A- return pt_active(&pit->pt0);=0A+ = return pt_active(¤t->domain->arch.vpit.pt0);=0A }=0A =0A static void = vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)=0A--- a/xen/arch/x= 86/hvm/vpt.c=0A+++ b/xen/arch/x86/hvm/vpt.c=0A@@ -463,17 +463,20 @@ static = void pt_adjust_vcpu(struct period=0A =0A void pt_adjust_global_vcpu_target(= struct vcpu *v)=0A {=0A+ struct PITState *vpit;=0A struct pl_time = *pl_time;=0A int i;=0A =0A if ( v =3D=3D NULL )=0A = return;=0A =0A- pl_time =3D &v->domain->arch.hvm_domain.pl_time;=0A+ = vpit =3D &v->domain->arch.vpit;=0A+=0A+ spin_lock(&vpit->lock);=0A+ = pt_adjust_vcpu(&vpit->pt0, v);=0A+ spin_unlock(&vpit->lock);=0A =0A- = spin_lock(&pl_time->vpit.lock);=0A- pt_adjust_vcpu(&pl_time->vpit.pt0, = v);=0A- spin_unlock(&pl_time->vpit.lock);=0A+ pl_time =3D &v->domain-= >arch.hvm_domain.pl_time;=0A =0A spin_lock(&pl_time->vrtc.lock);=0A = pt_adjust_vcpu(&pl_time->vrtc.pt, v);=0A@@ -507,7 +510,7 @@ void = pt_may_unmask_irq(struct domain *d,=0A =0A if ( d )=0A {=0A- = pt_resume(&d->arch.hvm_domain.pl_time.vpit.pt0);=0A+ pt_resume(&d->= arch.vpit.pt0);=0A pt_resume(&d->arch.hvm_domain.pl_time.vrtc.pt);= =0A for ( i =3D 0; i < HPET_TIMER_NUM; i++ )=0A = pt_resume(&d->arch.hvm_domain.pl_time.vhpet.pt[i]);=0A--- a/xen/arch/x86/ir= q.c=0A+++ b/xen/arch/x86/irq.c=0A@@ -764,14 +764,14 @@ bool_t cpu_has_pendi= ng_apic_eoi(void)=0A =0A static inline void set_pirq_eoi(struct domain *d, = unsigned int irq)=0A {=0A- if ( d->arch.pirq_eoi_map )=0A- = set_bit(irq, d->arch.pirq_eoi_map);=0A+ if ( !is_hvm_domain(d) && = d->arch.pv_domain.pirq_eoi_map )=0A+ set_bit(irq, d->arch.pv_domain.= pirq_eoi_map);=0A }=0A =0A static inline void clear_pirq_eoi(struct domain = *d, unsigned int irq)=0A {=0A- if ( d->arch.pirq_eoi_map )=0A- = clear_bit(irq, d->arch.pirq_eoi_map);=0A+ if ( !is_hvm_domain(d) && = d->arch.pv_domain.pirq_eoi_map )=0A+ clear_bit(irq, d->arch.pv_domai= n.pirq_eoi_map);=0A }=0A =0A static void _irq_guest_eoi(struct irq_desc = *desc)=0A--- a/xen/arch/x86/mm.c=0A+++ b/xen/arch/x86/mm.c=0A@@ -4710,7 = +4710,7 @@ long arch_memory_op(int op, XEN_GUEST_HA=0A if ( = copy_from_guest(&fmap, arg, 1) )=0A return -EFAULT;=0A =0A- = if ( fmap.map.nr_entries > ARRAY_SIZE(d->arch.e820) )=0A+ if ( = fmap.map.nr_entries > ARRAY_SIZE(d->arch.pv_domain.e820) )=0A = return -EINVAL;=0A =0A rc =3D rcu_lock_target_domain_by_id(fmap.dom= id, &d);=0A@@ -4724,9 +4724,15 @@ long arch_memory_op(int op, XEN_GUEST_HA= =0A return rc;=0A }=0A =0A- rc =3D copy_from_gue= st(d->arch.e820, fmap.map.buffer,=0A+ if ( is_hvm_domain(d) )=0A+ = {=0A+ rcu_unlock_domain(d);=0A+ return = -EPERM;=0A+ }=0A+=0A+ rc =3D copy_from_guest(d->arch.pv_domai= n.e820, fmap.map.buffer,=0A fmap.map.nr_entrie= s) ? -EFAULT : 0;=0A- d->arch.nr_e820 =3D fmap.map.nr_entries;=0A+ = d->arch.pv_domain.nr_e820 =3D fmap.map.nr_entries;=0A =0A = rcu_unlock_domain(d);=0A return rc;=0A@@ -4738,14 +4744,15 @@ long = arch_memory_op(int op, XEN_GUEST_HA=0A struct domain *d =3D = current->domain;=0A =0A /* Backwards compatibility. */=0A- = if ( d->arch.nr_e820 =3D=3D 0 )=0A+ if ( d->arch.pv_domain.nr_e820 = =3D=3D 0 )=0A return -ENOSYS;=0A =0A if ( copy_from_gue= st(&map, arg, 1) )=0A return -EFAULT;=0A =0A- map.nr_ent= ries =3D min(map.nr_entries, d->arch.nr_e820);=0A- if ( copy_to_gues= t(map.buffer, d->arch.e820, map.nr_entries) ||=0A+ map.nr_entries = =3D min(map.nr_entries, d->arch.pv_domain.nr_e820);=0A+ if ( = copy_to_guest(map.buffer, d->arch.pv_domain.e820,=0A+ = map.nr_entries) ||=0A copy_to_guest(arg, &map, 1) )=0A = return -EFAULT;=0A =0A--- a/xen/arch/x86/physdev.c=0A+++ = b/xen/arch/x86/physdev.c=0A@@ -264,7 +264,8 @@ ret_t do_physdev_op(int = cmd, XEN_GUEST_H=0A ret =3D -EINVAL;=0A if ( eoi.irq >=3D = v->domain->nr_pirqs )=0A break;=0A- if ( v->domain->arch= .pirq_eoi_map )=0A+ if ( !is_hvm_domain(v->domain) &&=0A+ = v->domain->arch.pv_domain.pirq_eoi_map )=0A evtchn_unmask(v->= domain->pirq_to_evtchn[eoi.irq]);=0A if ( !is_hvm_domain(v->domain)= ||=0A domain_pirq_to_emuirq(v->domain, eoi.irq) =3D=3D = IRQ_PT )=0A@@ -289,17 +290,18 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H= =0A PGT_writable_page) )=0A = break;=0A =0A- if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn, 0, = mfn) !=3D 0 )=0A+ if ( cmpxchg(&v->domain->arch.pv_domain.pirq_eoi_m= ap_mfn,=0A+ 0, mfn) !=3D 0 )=0A {=0A = put_page_and_type(mfn_to_page(mfn));=0A ret =3D -EBUSY;=0A = break;=0A }=0A =0A- v->domain->arch.pirq_eoi_map = =3D map_domain_page_global(mfn);=0A- if ( v->domain->arch.pirq_eoi_m= ap =3D=3D NULL )=0A+ v->domain->arch.pv_domain.pirq_eoi_map =3D = map_domain_page_global(mfn);=0A+ if ( v->domain->arch.pv_domain.pirq= _eoi_map =3D=3D NULL )=0A {=0A- v->domain->arch.pirq_eoi= _map_mfn =3D 0;=0A+ v->domain->arch.pv_domain.pirq_eoi_map_mfn = =3D 0;=0A put_page_and_type(mfn_to_page(mfn));=0A = ret =3D -ENOSPC;=0A break;=0A--- a/xen/drivers/passthrough/io.c= =0A+++ b/xen/drivers/passthrough/io.c=0A@@ -85,6 +85,14 @@ static void = pt_irq_time_out(void *data)=0A }=0A }=0A =0A+struct hvm_irq_dpci = *domain_get_irq_dpci(const struct domain *d)=0A+{=0A+ if ( !d || = !is_hvm_domain(d) )=0A+ return NULL;=0A+=0A+ return d->arch.hvm_d= omain.irq.dpci;=0A+}=0A+=0A void free_hvm_irq_dpci(struct hvm_irq_dpci = *dpci)=0A {=0A xfree(dpci->mirq);=0A@@ -150,12 +158,7 @@ int pt_irq_cre= ate_bind_vtd(=0A for ( int i =3D 0; i < NR_HVM_IRQS; i++ )=0A = INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]);=0A =0A- if ( = domain_set_irq_dpci(d, hvm_irq_dpci) =3D=3D 0 )=0A- {=0A- = spin_unlock(&d->event_lock);=0A- free_hvm_irq_dpci(hvm_irq_dpci= );=0A- return -EINVAL;=0A- }=0A+ d->arch.hvm_domai= n.irq.dpci =3D hvm_irq_dpci;=0A }=0A =0A if ( pt_irq_bind->irq_type= =3D=3D PT_IRQ_TYPE_MSI )=0A@@ -392,8 +395,7 @@ int hvm_do_IRQ_dpci(struct = domain *d, un=0A struct hvm_irq_dpci *dpci =3D domain_get_irq_dpci(d);= =0A =0A ASSERT(spin_is_locked(&irq_desc[domain_pirq_to_irq(d, = mirq)].lock));=0A- if ( !iommu_enabled || (d =3D=3D dom0) || !dpci = ||=0A- !test_bit(mirq, dpci->mapping))=0A+ if ( !iommu_enabled = || !dpci || !test_bit(mirq, dpci->mapping))=0A return 0;=0A =0A = set_bit(mirq, dpci->dirq_mask);=0A--- a/xen/drivers/passthrough/vtd/ia64/v= td.c=0A+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c=0A@@ -70,23 +70,6 @@ = void *__init map_to_nocache_virt(int nr_=0A return (void *) ( maddr + = __IA64_UNCACHED_OFFSET);=0A }=0A =0A-struct hvm_irq_dpci *domain_get_irq_dp= ci(struct domain *domain)=0A-{=0A- if ( !domain )=0A- return = NULL;=0A-=0A- return domain->arch.hvm_domain.irq.dpci;=0A-}=0A-=0A-int = domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)=0A-{= =0A- if ( !domain || !dpci )=0A- return 0;=0A-=0A- domain->arc= h.hvm_domain.irq.dpci =3D dpci;=0A- return 1;=0A-}=0A-=0A void = hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)=0A {=0A /* = dummy */=0A--- a/xen/drivers/passthrough/vtd/x86/vtd.c=0A+++ b/xen/drivers/= passthrough/vtd/x86/vtd.c=0A@@ -68,23 +68,6 @@ void *__init map_to_nocache_= virt(int nr_=0A return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + = nr_iommus);=0A }=0A =0A-struct hvm_irq_dpci *domain_get_irq_dpci(struct = domain *domain)=0A-{=0A- if ( !domain )=0A- return NULL;=0A-=0A- = return domain->arch.hvm_domain.irq.dpci;=0A-}=0A-=0A-int domain_set_irq_= dpci(struct domain *domain, struct hvm_irq_dpci *dpci)=0A-{=0A- if ( = !domain || !dpci )=0A- return 0;=0A-=0A- domain->arch.hvm_domain.= irq.dpci =3D dpci;=0A- return 1;=0A-}=0A-=0A void hvm_dpci_isairq_eoi(st= ruct domain *d, unsigned int isairq)=0A {=0A struct hvm_irq *hvm_irq = =3D &d->arch.hvm_domain.irq;=0A--- a/xen/include/asm-x86/domain.h=0A+++ = b/xen/include/asm-x86/domain.h=0A@@ -231,6 +231,17 @@ struct time_scale = {=0A u32 mul_frac;=0A };=0A =0A+struct pv_domain=0A+{=0A+ /* Shared = page for notifying that explicit PIRQ EOI is required. */=0A+ unsigned = long *pirq_eoi_map;=0A+ unsigned long pirq_eoi_map_mfn;=0A+=0A+ /* = Pseudophysical e820 map (XENMEM_memory_map). */=0A+ struct e820entry = e820[3];=0A+ unsigned int nr_e820;=0A+};=0A+=0A struct arch_domain=0A = {=0A #ifdef CONFIG_X86_64=0A@@ -253,7 +264,11 @@ struct arch_domain=0A = uint32_t pci_cf8;=0A =0A struct list_head pdev_list;=0A- struct = hvm_domain hvm_domain;=0A+=0A+ union {=0A+ struct pv_domain = pv_domain;=0A+ struct hvm_domain hvm_domain;=0A+ };=0A =0A = struct paging_domain paging;=0A struct p2m_domain *p2m;=0A@@ -265,14 = +280,6 @@ struct arch_domain=0A int *emuirq_pirq;=0A int *pirq_emui= rq;=0A =0A- /* Shared page for notifying that explicit PIRQ EOI is = required. */=0A- unsigned long *pirq_eoi_map;=0A- unsigned long = pirq_eoi_map_mfn;=0A-=0A- /* Pseudophysical e820 map (XENMEM_memory_map)= . */=0A- struct e820entry e820[3];=0A- unsigned int nr_e820;=0A-=0A = /* Maximum physical-address bitwidth supported by this guest. */=0A = unsigned int physaddr_bitsize;=0A =0A@@ -294,7 +301,9 @@ struct arch_domai= n=0A } relmem;=0A struct page_list_head relmem_list;=0A =0A- = cpuid_input_t cpuids[MAX_CPUID_INPUT];=0A+ cpuid_input_t *cpuids;=0A+=0A= + struct PITState vpit;=0A =0A /* For Guest vMCA handling */=0A = struct domain_mca_msrs *vmca_msrs;=0A--- a/xen/include/asm-x86/hvm/domain.h= =0A+++ b/xen/include/asm-x86/hvm/domain.h=0A@@ -47,7 +47,7 @@ struct = hvm_domain {=0A =0A struct pl_time pl_time;=0A =0A- struct = hvm_io_handler io_handler;=0A+ struct hvm_io_handler *io_handler;=0A = =0A /* Lock protects access to irq, vpic and vioapic. */=0A = spinlock_t irq_lock;=0A@@ -60,11 +60,12 @@ struct hvm_domain = {=0A struct vcpu *i8259_target;=0A =0A /* hvm_print_line(= ) logging. */=0A- char pbuf[80];=0A+#define HVM_PBUF_S= IZE 80=0A+ char *pbuf;=0A int = pbuf_idx;=0A spinlock_t pbuf_lock;=0A =0A- uint64_t = params[HVM_NR_PARAMS];=0A+ uint64_t *params;=0A = =0A /* Memory ranges with pinned cache attributes. */=0A struct = list_head pinned_cacheattr_ranges;=0A--- a/xen/include/asm-x86/hvm/vp= t.h=0A+++ b/xen/include/asm-x86/hvm/vpt.h=0A@@ -124,7 +124,6 @@ typedef = struct PMTState {=0A } PMTState;=0A =0A struct pl_time { /* platform = time */=0A- struct PITState vpit;=0A struct RTCState vrtc;=0A = struct HPETState vhpet;=0A struct PMTState vpmt;=0A@@ -143,7 +142,9 = @@ void pt_migrate(struct vcpu *v);=0A =0A void pt_adjust_global_vcpu_targe= t(struct vcpu *v);=0A #define pt_global_vcpu_target(d) \=0A- ((d)->arch.= hvm_domain.i8259_target ? : (d)->vcpu ? (d)->vcpu[0] : NULL)=0A+ = (is_hvm_domain(d) && (d)->arch.hvm_domain.i8259_target ? \=0A+ = (d)->arch.hvm_domain.i8259_target : \=0A+ (d)->vcpu ? (d)->vcpu[0] : = NULL)=0A =0A void pt_may_unmask_irq(struct domain *d, struct periodic_time = *vlapic_pt);=0A =0A--- a/xen/include/xen/iommu.h=0A+++ b/xen/include/xen/io= mmu.h=0A@@ -106,8 +106,7 @@ struct qi_ctrl *iommu_qi_ctrl(struct iom=0A = struct ir_ctrl *iommu_ir_ctrl(struct iommu *iommu);=0A struct iommu_flush = *iommu_get_flush(struct iommu *iommu);=0A void hvm_dpci_isairq_eoi(struct = domain *d, unsigned int isairq);=0A-struct hvm_irq_dpci *domain_get_irq_dpc= i(struct domain *domain);=0A-int domain_set_irq_dpci(struct domain = *domain, struct hvm_irq_dpci *dpci);=0A+struct hvm_irq_dpci *domain_get_irq= _dpci(const struct domain *);=0A void free_hvm_irq_dpci(struct hvm_irq_dpci= *dpci);=0A bool_t pt_irq_need_timer(uint32_t flags);=0A =0A --=__Part604CBA0E.0__= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel --=__Part604CBA0E.0__=--