From: George Dunlap <george.dunlap@eu.citrix.com>
To: xen-devel@lists.xen.org
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
Keir Fraser <keir@xen.org>, Tim Deegan <tim@xen.org>,
Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v15 03/19] pvh prep: code motion
Date: Mon, 11 Nov 2013 14:57:05 +0000 [thread overview]
Message-ID: <1384181841-22739-4-git-send-email-george.dunlap@eu.citrix.com> (raw)
In-Reply-To: <1384181841-22739-1-git-send-email-george.dunlap@eu.citrix.com>
There are many functions where PVH requires some code in common with
HVM. Rearrange some of these functions so that the code is together.
In general, the HVM code that PVH also uses includes:
- cacheattr functionality
- paging
- hvm_funcs
- hvm_assert_evtchn_irq tasklet
- tm_list
- hvm_params
And code that PVH shares with PV but not with PVH:
- updating the domain wallclock
- setting v->is_initialized
There should be no end-to-end changes in behavior.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Acked-by: Tim Deegan <tim@xen.org>
---
v15:
- Nuke unnecessary movement.
v14:
- Remove changes in arch_set_info_guest (more of the code is unified)
- hvm_funcs.vcpu_initialise() must be called after vlapic_init()
v13:
- Don't bother calling tasklet_kill in failure path of
hvm_vcpu_initialize
- Allocate hvm_params for PVH domains
CC: Jan Beulich <jbeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: Keir Fraser <keir@xen.org>
---
xen/arch/x86/hvm/hvm.c | 89 +++++++++++++++++++++++++-----------------------
1 file changed, 47 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 36699b8..85f9857 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -525,11 +525,17 @@ int hvm_domain_initialise(struct domain *d)
INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
+ hvm_init_cacheattr_region_list(d);
+
+ rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
+ if ( rc != 0 )
+ goto fail0;
+
d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler);
rc = -ENOMEM;
if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
- goto fail0;
+ goto fail1;
d->arch.hvm_domain.io_handler->num_slot = 0;
hvm_init_guest_time(d);
@@ -537,12 +543,6 @@ int hvm_domain_initialise(struct domain *d)
d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
- hvm_init_cacheattr_region_list(d);
-
- rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
- if ( rc != 0 )
- goto fail1;
-
vpic_init(d);
rc = vioapic_init(d);
@@ -569,10 +569,10 @@ int hvm_domain_initialise(struct domain *d)
stdvga_deinit(d);
vioapic_deinit(d);
fail1:
- hvm_destroy_cacheattr_region_list(d);
- fail0:
xfree(d->arch.hvm_domain.io_handler);
xfree(d->arch.hvm_domain.params);
+ fail0:
+ hvm_destroy_cacheattr_region_list(d);
return rc;
}
@@ -601,11 +601,11 @@ void hvm_domain_relinquish_resources(struct domain *d)
void hvm_domain_destroy(struct domain *d)
{
+ hvm_destroy_cacheattr_region_list(d);
hvm_funcs.domain_destroy(d);
rtc_deinit(d);
stdvga_deinit(d);
vioapic_deinit(d);
- hvm_destroy_cacheattr_region_list(d);
}
static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
@@ -1091,24 +1091,47 @@ int hvm_vcpu_initialise(struct vcpu *v)
{
int rc;
struct domain *d = v->domain;
- domid_t dm_domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
+ domid_t dm_domid;
hvm_asid_flush_vcpu(v);
- if ( (rc = vlapic_init(v)) != 0 )
+ spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
+ INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
+
+ rc = hvm_vcpu_cacheattr_init(v); /* teardown: vcpu_cacheattr_destroy */
+ if ( rc != 0 )
goto fail1;
- if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
+ /* NB: vlapic_init must be called before hvm_funcs.vcpu_initialise */
+ if ( (rc = vlapic_init(v)) != 0 ) /* teardown: vlapic_destroy */
goto fail2;
- if ( nestedhvm_enabled(d)
- && (rc = nestedhvm_vcpu_initialise(v)) < 0 )
+ if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
goto fail3;
+ softirq_tasklet_init(
+ &v->arch.hvm_vcpu.assert_evtchn_irq_tasklet,
+ (void(*)(unsigned long))hvm_assert_evtchn_irq,
+ (unsigned long)v);
+
+ v->arch.user_regs.eflags = 2;
+
+ v->arch.hvm_vcpu.inject_trap.vector = -1;
+
+ rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
+ if ( rc != 0 )
+ goto fail4;
+
+ if ( nestedhvm_enabled(d)
+ && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
+ goto fail5;
+
+ dm_domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
+
/* Create ioreq event channel. */
- rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL);
+ rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /* teardown: none */
if ( rc < 0 )
- goto fail4;
+ goto fail6;
/* Register ioreq event channel. */
v->arch.hvm_vcpu.xen_port = rc;
@@ -1116,9 +1139,9 @@ int hvm_vcpu_initialise(struct vcpu *v)
if ( v->vcpu_id == 0 )
{
/* Create bufioreq event channel. */
- rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL);
+ rc = alloc_unbound_xen_event_channel(v, dm_domid, NULL); /* teardown: none */
if ( rc < 0 )
- goto fail4;
+ goto fail6;
d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = rc;
}
@@ -1127,26 +1150,6 @@ int hvm_vcpu_initialise(struct vcpu *v)
get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
spin_unlock(&d->arch.hvm_domain.ioreq.lock);
- spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
- INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
-
- v->arch.hvm_vcpu.inject_trap.vector = -1;
-
- rc = setup_compat_arg_xlat(v);
- if ( rc != 0 )
- goto fail4;
-
- rc = hvm_vcpu_cacheattr_init(v);
- if ( rc != 0 )
- goto fail5;
-
- softirq_tasklet_init(
- &v->arch.hvm_vcpu.assert_evtchn_irq_tasklet,
- (void(*)(unsigned long))hvm_assert_evtchn_irq,
- (unsigned long)v);
-
- v->arch.user_regs.eflags = 2;
-
if ( v->vcpu_id == 0 )
{
/* NB. All these really belong in hvm_domain_initialise(). */
@@ -1164,14 +1167,16 @@ int hvm_vcpu_initialise(struct vcpu *v)
return 0;
+ fail6:
+ nestedhvm_vcpu_destroy(v);
fail5:
free_compat_arg_xlat(v);
fail4:
- nestedhvm_vcpu_destroy(v);
- fail3:
hvm_funcs.vcpu_destroy(v);
- fail2:
+ fail3:
vlapic_destroy(v);
+ fail2:
+ hvm_vcpu_cacheattr_destroy(v);
fail1:
return rc;
}
--
1.7.9.5
next prev parent reply other threads:[~2013-11-11 14:57 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-11-11 14:57 [PATCH v15 00/18] Introduce PVH domU support George Dunlap
2013-11-11 14:57 ` [PATCH v15 01/19] Allow vmx_update_debug_state to be called when v!=current George Dunlap
2013-11-11 14:57 ` [PATCH v15 02/19] libxc: Move temporary grant table mapping to end of memory George Dunlap
2013-11-11 14:57 ` George Dunlap [this message]
2013-11-11 14:57 ` [PATCH v15 04/19] pvh: Tolerate HVM guests having no ioreq page George Dunlap
2013-11-11 14:57 ` [PATCH v15 05/19] pvh prep: Introduce pv guest type and has_hvm_container macros George Dunlap
2013-11-12 13:34 ` Jan Beulich
2013-11-12 15:12 ` George Dunlap
2013-11-11 14:57 ` [PATCH v15 06/19] pvh: Introduce PVH guest type George Dunlap
2013-11-11 14:57 ` [PATCH v15 07/19] pvh: Disable unneeded features of HVM containers George Dunlap
2013-11-12 13:51 ` Jan Beulich
2013-11-12 14:56 ` George Dunlap
2013-11-12 15:03 ` Jan Beulich
2013-11-12 15:08 ` George Dunlap
2013-11-11 14:57 ` [PATCH v15 08/19] pvh: vmx-specific changes George Dunlap
2013-11-12 14:03 ` Jan Beulich
2013-11-12 15:06 ` George Dunlap
2013-11-12 15:24 ` Jan Beulich
2013-11-11 14:57 ` [PATCH v15 09/19] pvh: Do not allow PVH guests to change paging modes George Dunlap
2013-11-11 14:57 ` [PATCH v15 10/19] pvh: PVH access to hypercalls George Dunlap
2013-11-11 14:57 ` [PATCH v15 11/19] pvh: Use PV e820 George Dunlap
2013-11-11 14:57 ` [PATCH v15 12/19] pvh: Set up more PV stuff in set_info_guest George Dunlap
2013-11-11 14:57 ` [PATCH v15 13/19] pvh: PV cpuid George Dunlap
2013-11-11 14:57 ` [PATCH v15 14/19] pvh: Use PV handlers for PIO George Dunlap
2013-11-12 14:33 ` Jan Beulich
2013-11-12 16:54 ` George Dunlap
2013-11-12 17:00 ` Jan Beulich
2013-11-11 14:57 ` [PATCH v15 15/19] pvh: Disable 32-bit guest support for now George Dunlap
2013-11-11 14:57 ` [PATCH v15 16/19] pvh: Restrict tsc_mode to NEVER_EMULATE " George Dunlap
2013-11-11 14:57 ` [PATCH v15 17/19] pvh: Documentation George Dunlap
2013-11-11 14:57 ` [PATCH v15 18/19] pvh tools: libxc changes to build a PVH guest George Dunlap
2013-11-12 11:33 ` Ian Jackson
2013-11-11 14:57 ` [PATCH v15 19/19] pvh tools: libxl changes to create " George Dunlap
2013-11-12 11:38 ` Ian Jackson
2013-11-11 15:30 ` [PATCH v15 00/18] Introduce PVH domU support George Dunlap
2013-11-11 17:17 ` Keir Fraser
2013-11-12 7:19 ` Dong, Eddie
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1384181841-22739-4-git-send-email-george.dunlap@eu.citrix.com \
--to=george.dunlap@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).