From: George Dunlap <george.dunlap@eu.citrix.com>
To: xen-devel@lists.xen.org
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
Keir Fraser <keir@xen.org>, Tim Deegan <tim@xen.org>,
Jan Beulich <jbeulich@suse.com>
Subject: [PATCH RFC v13 05/20] pvh: Disable unneeded features of HVM containers
Date: Mon, 23 Sep 2013 17:49:45 +0100 [thread overview]
Message-ID: <1379955000-11050-6-git-send-email-george.dunlap@eu.citrix.com> (raw)
In-Reply-To: <1379955000-11050-1-git-send-email-george.dunlap@eu.citrix.com>
Things kept:
* cacheattr_region lists
* irq-related structures
* paging
* tm_list
* hvm params
Things disabled for now:
* compat xlation
Things disabled:
* Emulated timers and clock sources
* IO/MMIO emulation
* msix tables
* hvm_funcs
* nested HVM
* Fast-path for emulated lapic accesses
Getting rid of the hvm_params struct required a couple other places to
check for its existence before attempting to read the params.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
v13:
- Removed unnecessary comment
- Allocate params for hvm domains; remove null checks necessary in last patch
- Add ASSERT(!is_pvh) to handle_pio
CC: Jan Beulich <jbeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: Keir Fraser <keir@xen.org>
---
xen/arch/x86/hvm/hvm.c | 43 ++++++++++++++++++++++++++++++++++++++-----
xen/arch/x86/hvm/io.c | 7 +++++++
xen/arch/x86/hvm/irq.c | 3 +++
xen/arch/x86/hvm/vmx/intr.c | 3 ++-
4 files changed, 50 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index aaf956a..370bd4d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -301,6 +301,10 @@ u64 hvm_get_guest_tsc_adjust(struct vcpu *v)
void hvm_migrate_timers(struct vcpu *v)
{
+ /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism. */
+ if ( is_pvh_vcpu(v) )
+ return;
+
rtc_migrate_timers(v);
pt_migrate(v);
}
@@ -342,10 +346,13 @@ void hvm_do_resume(struct vcpu *v)
{
ioreq_t *p;
- pt_restore_timer(v);
-
check_wakeup_from_wait();
+ if ( is_pvh_vcpu(v) )
+ goto check_inject_trap;
+
+ pt_restore_timer(v);
+
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
p = get_ioreq(v);
while ( p->state != STATE_IOREQ_NONE )
@@ -368,6 +375,7 @@ void hvm_do_resume(struct vcpu *v)
}
}
+ check_inject_trap:
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
@@ -528,10 +536,16 @@ int hvm_domain_initialise(struct domain *d)
if ( rc != 0 )
goto fail0;
+ rc = -ENOMEM;
d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
+ if ( !d->arch.hvm_domain.params )
+ goto fail1;
+
+ if ( is_pvh_domain(d) )
+ return 0;
+
d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler);
- rc = -ENOMEM;
- if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
+ if ( !d->arch.hvm_domain.io_handler )
goto fail1;
d->arch.hvm_domain.io_handler->num_slot = 0;
@@ -578,6 +592,9 @@ int hvm_domain_initialise(struct domain *d)
void hvm_domain_relinquish_resources(struct domain *d)
{
+ if ( is_pvh_domain(d) )
+ return;
+
if ( hvm_funcs.nhvm_domain_relinquish_resources )
hvm_funcs.nhvm_domain_relinquish_resources(d);
@@ -602,6 +619,10 @@ void hvm_domain_relinquish_resources(struct domain *d)
void hvm_domain_destroy(struct domain *d)
{
hvm_destroy_cacheattr_region_list(d);
+
+ if ( is_pvh_domain(d) )
+ return;
+
hvm_funcs.domain_destroy(d);
rtc_deinit(d);
stdvga_deinit(d);
@@ -1114,6 +1135,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
v->arch.hvm_vcpu.inject_trap.vector = -1;
+ if ( is_pvh_vcpu(v) )
+ {
+ v->arch.hvm_vcpu.hcall_64bit = 1; /* PVH 32bitfixme. */
+ /* This for hvm_long_mode_enabled(v). */
+ v->arch.hvm_vcpu.guest_efer = EFER_SCE | EFER_LMA | EFER_LME;
+ return 0;
+ }
+
rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
if ( rc != 0 )
goto fail3;
@@ -1188,7 +1217,10 @@ void hvm_vcpu_destroy(struct vcpu *v)
tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
hvm_vcpu_cacheattr_destroy(v);
- vlapic_destroy(v);
+
+ if ( is_hvm_vcpu(v) )
+ vlapic_destroy(v);
+
hvm_funcs.vcpu_destroy(v);
/* Event channel is already freed by evtchn_destroy(). */
@@ -1389,6 +1421,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
/* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
* a fast path for LAPIC accesses, skipping the p2m lookup. */
if ( !nestedhvm_vcpu_in_guestmode(v)
+ && is_hvm_vcpu(v)
&& gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
{
if ( !handle_mmio() )
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 4ae2c0c..6edc5c9 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -175,6 +175,10 @@ int handle_mmio(void)
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
int rc;
+ /* No MMIO for PVH vcpus */
+ if ( is_pvh_vcpu(curr) )
+ return 0;
+
hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
rc = hvm_emulate_one(&ctxt);
@@ -228,6 +232,9 @@ int handle_pio(uint16_t port, int size, int dir)
unsigned long data, reps = 1;
int rc;
+ /* PIO for PVH is handled by the PV handlers */
+ ASSERT(!is_pvh_vcpu(curr));
+
if ( dir == IOREQ_WRITE )
data = guest_cpu_user_regs()->eax;
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 6a6fb68..677fbcd 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -405,6 +405,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
&& vcpu_info(v, evtchn_upcall_pending) )
return hvm_intack_vector(plat->irq.callback_via.vector);
+ if ( is_pvh_vcpu(v) )
+ return hvm_intack_none;
+
if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
return hvm_intack_pic(0);
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index e6d5b46..482413e 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -225,7 +225,8 @@ void vmx_intr_assist(void)
}
/* Crank the handle on interrupt state. */
- pt_vector = pt_update_irq(v);
+ if ( is_hvm_vcpu(v) )
+ pt_vector = pt_update_irq(v);
do {
intack = hvm_vcpu_has_pending_irq(v);
--
1.7.9.5
next prev parent reply other threads:[~2013-09-23 16:49 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-23 16:49 [PATCH RFC v13 00/20] Introduce PVH domU support George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 01/20] Allow vmx_update_debug_state to be called when v!=current George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 02/20] pvh prep: code motion George Dunlap
2013-09-26 9:20 ` Tim Deegan
2013-10-04 15:29 ` Roger Pau Monné
2013-09-23 16:49 ` [PATCH RFC v13 03/20] Introduce pv guest type and has_hvm_container macros George Dunlap
2013-09-26 11:53 ` Tim Deegan
2013-09-26 12:54 ` Ian Campbell
2013-09-26 13:46 ` George Dunlap
2013-09-26 15:31 ` Konrad Rzeszutek Wilk
2013-09-26 16:24 ` Tim Deegan
2013-09-23 16:49 ` [PATCH RFC v13 04/20] pvh: Introduce PVH guest type George Dunlap
2013-09-23 16:49 ` George Dunlap [this message]
2013-09-26 15:22 ` [PATCH RFC v13 05/20] pvh: Disable unneeded features of HVM containers Jan Beulich
2013-11-04 12:31 ` George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 06/20] pvh: vmx-specific changes George Dunlap
2013-09-26 15:29 ` Jan Beulich
2013-11-07 14:14 ` George Dunlap
2013-11-07 14:29 ` Jan Beulich
2013-10-07 15:55 ` Roger Pau Monné
2013-10-07 16:06 ` George Dunlap
2013-10-07 16:12 ` Tim Deegan
2013-10-07 16:20 ` George Dunlap
2013-10-07 17:08 ` Tim Deegan
2013-10-08 8:45 ` Jan Beulich
2013-11-07 12:02 ` George Dunlap
2013-11-07 13:12 ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 07/20] pvh: Do not allow PVH guests to change paging modes George Dunlap
2013-09-26 15:30 ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 08/20] pvh: PVH access to hypercalls George Dunlap
2013-09-26 15:33 ` Jan Beulich
2013-09-27 21:15 ` Mukesh Rathor
2013-09-30 6:38 ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 09/20] pvh: Use PV e820 George Dunlap
2013-09-27 17:57 ` Konrad Rzeszutek Wilk
2013-09-23 16:49 ` [PATCH RFC v13 10/20] pvh: Support guest_kernel_mode for PVH George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 11/20] pvh: Support read_segment_register " George Dunlap
2013-09-26 15:36 ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 12/20] pvh: read_descriptor for PVH guests George Dunlap
2013-09-27 18:34 ` Konrad Rzeszutek Wilk
2013-09-23 16:49 ` [PATCH RFC v13 13/20] pvh: Set up more PV stuff in set_info_guest George Dunlap
2013-09-26 15:43 ` Jan Beulich
2013-11-07 15:57 ` George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 14/20] pvh: Use PV handlers for emulated forced invalid ops, cpuid, and IO George Dunlap
2013-09-26 15:52 ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 15/20] pvh: Disable 32-bit guest support for now George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 16/20] pvh: Restrict tsc_mode to NEVER_EMULATE " George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 17/20] pvh: Disable debug traps when doing pv emulation for PVH domains George Dunlap
2013-09-26 15:55 ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 18/20] pvh: Documentation George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 19/20] PVH xen tools: libxc changes to build a PVH guest George Dunlap
2013-09-27 18:37 ` Konrad Rzeszutek Wilk
2013-10-18 16:45 ` Roger Pau Monné
2013-11-04 11:56 ` George Dunlap
2013-11-04 13:18 ` Roger Pau Monné
2013-09-23 16:50 ` [PATCH RFC v13 20/20] PVH xen tools: libxl changes to create " George Dunlap
2013-09-27 18:38 ` Konrad Rzeszutek Wilk
2013-09-27 13:08 ` [PATCH RFC v13 00/20] Introduce PVH domU support Konrad Rzeszutek Wilk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1379955000-11050-6-git-send-email-george.dunlap@eu.citrix.com \
--to=george.dunlap@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).