From mboxrd@z Thu Jan 1 00:00:00 1970 From: George Dunlap Subject: [PATCH RFC v13 02/20] pvh prep: code motion Date: Mon, 23 Sep 2013 17:49:42 +0100 Message-ID: <1379955000-11050-3-git-send-email-george.dunlap@eu.citrix.com> References: <1379955000-11050-1-git-send-email-george.dunlap@eu.citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1379955000-11050-1-git-send-email-george.dunlap@eu.citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: George Dunlap , Keir Fraser , Tim Deegan , Jan Beulich List-Id: xen-devel@lists.xenproject.org There are many functions where PVH requires some code in common with HVM. Rearrange some of these functions so that the code is together. In general, the HVM code that PVH also uses includes: - cacheattr functionality - paging - hvm_funcs - hvm_assert_evtchn_irq tasklet - tm_list - hvm_params And code that PVH shares with PV but not with PVH: - updating the domain wallclock - setting v->is_initialized There should be no end-to-end changes in behavior. Signed-off-by: George Dunlap Signed-off-by: Mukesh Rathor --- v13: - Don't bother calling tasklet_kill in failure path of hvm_vcpu_initialize - Allocate hvm_params for PVH domains CC: Jan Beulich CC: Tim Deegan CC: Keir Fraser --- xen/arch/x86/domain.c | 11 +++--- xen/arch/x86/hvm/hvm.c | 92 +++++++++++++++++++++++++----------------------- 2 files changed, 53 insertions(+), 50 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 316ef04..5ebc68c 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -945,17 +945,16 @@ int arch_set_info_guest( clear_bit(_VPF_in_reset, &v->pause_flags); - if ( v->vcpu_id == 0 ) - update_domain_wallclock_time(d); - - /* Don't redo final setup */ - v->is_initialised = 1; - if ( paging_mode_enabled(d) ) paging_update_paging_modes(v); update_cr3(v); + if ( v->vcpu_id == 0 ) + update_domain_wallclock_time(d); + + /* Don't redo final setup */ + v->is_initialised = 1; out: if ( flags & VGCF_online ) clear_bit(_VPF_down, &v->pause_flags); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e0e0f5d..aaf956a 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -522,27 +522,27 @@ int hvm_domain_initialise(struct domain *d) spin_lock_init(&d->arch.hvm_domain.irq_lock); spin_lock_init(&d->arch.hvm_domain.uc_lock); - INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list); - spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock); + hvm_init_cacheattr_region_list(d); + + rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); + if ( rc != 0 ) + goto fail0; d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler); rc = -ENOMEM; if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) - goto fail0; + goto fail1; d->arch.hvm_domain.io_handler->num_slot = 0; + INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list); + spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock); + hvm_init_guest_time(d); d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1; d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot; - hvm_init_cacheattr_region_list(d); - - rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); - if ( rc != 0 ) - goto fail1; - vpic_init(d); rc = vioapic_init(d); @@ -569,10 +569,10 @@ int hvm_domain_initialise(struct domain *d) stdvga_deinit(d); vioapic_deinit(d); fail1: - hvm_destroy_cacheattr_region_list(d); - fail0: xfree(d->arch.hvm_domain.io_handler); xfree(d->arch.hvm_domain.params); + fail0: + hvm_destroy_cacheattr_region_list(d); return rc; } @@ -601,11 +601,11 @@ void hvm_domain_relinquish_resources(struct domain *d) void hvm_domain_destroy(struct domain *d) { + hvm_destroy_cacheattr_region_list(d); hvm_funcs.domain_destroy(d); rtc_deinit(d); stdvga_deinit(d); vioapic_deinit(d); - hvm_destroy_cacheattr_region_list(d); } static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) @@ -1091,24 +1091,46 @@ int hvm_vcpu_initialise(struct vcpu *v) { int rc; struct domain *d = v->domain; - domid_t dm_domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN]; + domid_t dm_domid; hvm_asid_flush_vcpu(v); - if ( (rc = vlapic_init(v)) != 0 ) + spin_lock_init(&v->arch.hvm_vcpu.tm_lock); + INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list); + + rc = hvm_vcpu_cacheattr_init(v); /* t */ + if ( rc != 0 ) goto fail1; - if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) + if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */ goto fail2; - if ( nestedhvm_enabled(d) - && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) + softirq_tasklet_init( + &v->arch.hvm_vcpu.assert_evtchn_irq_tasklet, + (void(*)(unsigned long))hvm_assert_evtchn_irq, + (unsigned long)v); + + v->arch.user_regs.eflags = 2; + + v->arch.hvm_vcpu.inject_trap.vector = -1; + + rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */ + if ( rc != 0 ) goto fail3; + if ( (rc = vlapic_init(v)) != 0 ) /* teardown: vlapic_destroy */ + goto fail4; + + if ( nestedhvm_enabled(d) + && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */ + goto fail5; + + dm_domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN]; + /* Create ioreq event channel. */ - rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); + rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /* teardown: none */ if ( rc < 0 ) - goto fail4; + goto fail6; /* Register ioreq event channel. */ v->arch.hvm_vcpu.xen_port = rc; @@ -1116,9 +1138,9 @@ int hvm_vcpu_initialise(struct vcpu *v) if ( v->vcpu_id == 0 ) { /* Create bufioreq event channel. */ - rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); + rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /* teardown: none */ if ( rc < 0 ) - goto fail4; + goto fail6; d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = rc; } @@ -1127,26 +1149,6 @@ int hvm_vcpu_initialise(struct vcpu *v) get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; spin_unlock(&d->arch.hvm_domain.ioreq.lock); - spin_lock_init(&v->arch.hvm_vcpu.tm_lock); - INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list); - - v->arch.hvm_vcpu.inject_trap.vector = -1; - - rc = setup_compat_arg_xlat(v); - if ( rc != 0 ) - goto fail4; - - rc = hvm_vcpu_cacheattr_init(v); - if ( rc != 0 ) - goto fail5; - - softirq_tasklet_init( - &v->arch.hvm_vcpu.assert_evtchn_irq_tasklet, - (void(*)(unsigned long))hvm_assert_evtchn_irq, - (unsigned long)v); - - v->arch.user_regs.eflags = 2; - if ( v->vcpu_id == 0 ) { /* NB. All these really belong in hvm_domain_initialise(). */ @@ -1164,14 +1166,16 @@ int hvm_vcpu_initialise(struct vcpu *v) return 0; + fail6: + nestedhvm_vcpu_destroy(v); fail5: - free_compat_arg_xlat(v); + vlapic_destroy(v); fail4: - nestedhvm_vcpu_destroy(v); + free_compat_arg_xlat(v); fail3: hvm_funcs.vcpu_destroy(v); fail2: - vlapic_destroy(v); + hvm_vcpu_cacheattr_destroy(v); fail1: return rc; } -- 1.7.9.5