xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: George Dunlap <george.dunlap@eu.citrix.com>
To: xen-devel@lists.xen.org
Cc: Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Eddie Dong <eddie.dong@intel.com>, Tim Deegan <tim@xen.org>,
	Jun Nakajima <jun.nakajima@intel.com>
Subject: [PATCH v15 07/19] pvh: Disable unneeded features of HVM containers
Date: Mon, 11 Nov 2013 14:57:09 +0000	[thread overview]
Message-ID: <1384181841-22739-8-git-send-email-george.dunlap@eu.citrix.com> (raw)
In-Reply-To: <1384181841-22739-1-git-send-email-george.dunlap@eu.citrix.com>

Things kept:
* cacheattr_region lists
* irq-related structures
* paging
* tm_list
* hvm params
* hvm_domaim.io_handler (for handling PV io)

Things disabled for now:
* compat xlation

Things disabled:
* Emulated timers and clock sources
* IO/MMIO ioreq pages, event channels
* msix tables
* hvm_funcs
* nested HVM
* Fast-path for emulated lapic accesses

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
v15:
 - Fix typo in comment
 - Allocate io_handler (used later when using HVM io path for PV io)
 - Port hvm_do_resume to "hvm no ioreq" patch
v14:
 - Also free the params struct for pvh domains, since we've allocated it
 - Fail io for pvh VMs further down the stack, as we will be using the emulation
   code before calling into the pv pio handlers
v13:
 - Removed unnecessary comment
 - Allocate params for pvh domains; remove null checks necessary in last patch
 - Add ASSERT(!is_pvh) to handle_pio
CC: Jan Beulich <jbeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: Keir Fraser <keir@xen.org>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Eddie Dong <eddie.dong@intel.com>
---
 xen/arch/x86/hvm/hvm.c      |   43 ++++++++++++++++++++++++++++++++++++-------
 xen/arch/x86/hvm/irq.c      |    3 +++
 xen/arch/x86/hvm/vmx/intr.c |    3 ++-
 3 files changed, 41 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0a9c922..10ddc34 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -304,6 +304,10 @@ u64 hvm_get_guest_tsc_adjust(struct vcpu *v)
 
 void hvm_migrate_timers(struct vcpu *v)
 {
+    /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism. */
+    if ( is_pvh_vcpu(v) )
+        return;
+
     rtc_migrate_timers(v);
     pt_migrate(v);
 }
@@ -345,10 +349,11 @@ void hvm_do_resume(struct vcpu *v)
 {
     ioreq_t *p;
 
-    pt_restore_timer(v);
-
     check_wakeup_from_wait();
 
+    if ( is_hvm_vcpu(v) )
+        pt_restore_timer(v);
+
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
     if ( !(p = get_ioreq(v)) )
         goto check_inject_trap;
@@ -560,6 +565,9 @@ int hvm_domain_initialise(struct domain *d)
         goto fail1;
     d->arch.hvm_domain.io_handler->num_slot = 0;
 
+    if ( is_pvh_domain(d) )
+        return 0;
+
     hvm_init_guest_time(d);
 
     d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
@@ -600,6 +608,12 @@ int hvm_domain_initialise(struct domain *d)
 
 void hvm_domain_relinquish_resources(struct domain *d)
 {
+    xfree(d->arch.hvm_domain.io_handler);
+    xfree(d->arch.hvm_domain.params);
+
+    if ( is_pvh_domain(d) )
+        return;
+
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
         hvm_funcs.nhvm_domain_relinquish_resources(d);
 
@@ -616,14 +630,15 @@ void hvm_domain_relinquish_resources(struct domain *d)
         pmtimer_deinit(d);
         hpet_deinit(d);
     }
-
-    xfree(d->arch.hvm_domain.io_handler);
-    xfree(d->arch.hvm_domain.params);
 }
 
 void hvm_domain_destroy(struct domain *d)
 {
     hvm_destroy_cacheattr_region_list(d);
+
+    if ( is_pvh_domain(d) )
+        return;
+
     hvm_funcs.domain_destroy(d);
     rtc_deinit(d);
     stdvga_deinit(d);
@@ -1125,7 +1140,9 @@ int hvm_vcpu_initialise(struct vcpu *v)
         goto fail1;
 
     /* NB: vlapic_init must be called before hvm_funcs.vcpu_initialise */
-    if ( (rc = vlapic_init(v)) != 0 ) /* teardown: vlapic_destroy */
+    if ( is_hvm_vcpu(v) )
+        rc = vlapic_init(v);
+    if ( rc != 0 ) /* teardown: vlapic_destroy */
         goto fail2;
 
     if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
@@ -1140,6 +1157,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
     v->arch.hvm_vcpu.inject_trap.vector = -1;
 
+    if ( is_pvh_vcpu(v) )
+    {
+        v->arch.hvm_vcpu.hcall_64bit = 1;    /* PVH 32bitfixme. */
+        /* This is for hvm_long_mode_enabled(v). */
+        v->arch.hvm_vcpu.guest_efer = EFER_SCE | EFER_LMA | EFER_LME;
+        return 0;
+    }
+
     rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
     if ( rc != 0 )
         goto fail4;
@@ -1211,7 +1236,10 @@ void hvm_vcpu_destroy(struct vcpu *v)
 
     tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
     hvm_vcpu_cacheattr_destroy(v);
-    vlapic_destroy(v);
+
+    if ( is_hvm_vcpu(v) )
+        vlapic_destroy(v);
+
     hvm_funcs.vcpu_destroy(v);
 
     /* Event channel is already freed by evtchn_destroy(). */
@@ -1414,6 +1442,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
     /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
      * a fast path for LAPIC accesses, skipping the p2m lookup. */
     if ( !nestedhvm_vcpu_in_guestmode(v)
+         && is_hvm_vcpu(v)
          && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
     {
         if ( !handle_mmio() )
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 6a6fb68..677fbcd 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -405,6 +405,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
          && vcpu_info(v, evtchn_upcall_pending) )
         return hvm_intack_vector(plat->irq.callback_via.vector);
 
+    if ( is_pvh_vcpu(v) )
+        return hvm_intack_none;
+
     if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
         return hvm_intack_pic(0);
 
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 1942e31..7757910 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -236,7 +236,8 @@ void vmx_intr_assist(void)
     }
 
     /* Crank the handle on interrupt state. */
-    pt_vector = pt_update_irq(v);
+    if ( is_hvm_vcpu(v) )
+        pt_vector = pt_update_irq(v);
 
     do {
         unsigned long intr_info;
-- 
1.7.9.5

  parent reply	other threads:[~2013-11-11 14:57 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-11 14:57 [PATCH v15 00/18] Introduce PVH domU support George Dunlap
2013-11-11 14:57 ` [PATCH v15 01/19] Allow vmx_update_debug_state to be called when v!=current George Dunlap
2013-11-11 14:57 ` [PATCH v15 02/19] libxc: Move temporary grant table mapping to end of memory George Dunlap
2013-11-11 14:57 ` [PATCH v15 03/19] pvh prep: code motion George Dunlap
2013-11-11 14:57 ` [PATCH v15 04/19] pvh: Tolerate HVM guests having no ioreq page George Dunlap
2013-11-11 14:57 ` [PATCH v15 05/19] pvh prep: Introduce pv guest type and has_hvm_container macros George Dunlap
2013-11-12 13:34   ` Jan Beulich
2013-11-12 15:12     ` George Dunlap
2013-11-11 14:57 ` [PATCH v15 06/19] pvh: Introduce PVH guest type George Dunlap
2013-11-11 14:57 ` George Dunlap [this message]
2013-11-12 13:51   ` [PATCH v15 07/19] pvh: Disable unneeded features of HVM containers Jan Beulich
2013-11-12 14:56     ` George Dunlap
2013-11-12 15:03       ` Jan Beulich
2013-11-12 15:08         ` George Dunlap
2013-11-11 14:57 ` [PATCH v15 08/19] pvh: vmx-specific changes George Dunlap
2013-11-12 14:03   ` Jan Beulich
2013-11-12 15:06     ` George Dunlap
2013-11-12 15:24       ` Jan Beulich
2013-11-11 14:57 ` [PATCH v15 09/19] pvh: Do not allow PVH guests to change paging modes George Dunlap
2013-11-11 14:57 ` [PATCH v15 10/19] pvh: PVH access to hypercalls George Dunlap
2013-11-11 14:57 ` [PATCH v15 11/19] pvh: Use PV e820 George Dunlap
2013-11-11 14:57 ` [PATCH v15 12/19] pvh: Set up more PV stuff in set_info_guest George Dunlap
2013-11-11 14:57 ` [PATCH v15 13/19] pvh: PV cpuid George Dunlap
2013-11-11 14:57 ` [PATCH v15 14/19] pvh: Use PV handlers for PIO George Dunlap
2013-11-12 14:33   ` Jan Beulich
2013-11-12 16:54     ` George Dunlap
2013-11-12 17:00       ` Jan Beulich
2013-11-11 14:57 ` [PATCH v15 15/19] pvh: Disable 32-bit guest support for now George Dunlap
2013-11-11 14:57 ` [PATCH v15 16/19] pvh: Restrict tsc_mode to NEVER_EMULATE " George Dunlap
2013-11-11 14:57 ` [PATCH v15 17/19] pvh: Documentation George Dunlap
2013-11-11 14:57 ` [PATCH v15 18/19] pvh tools: libxc changes to build a PVH guest George Dunlap
2013-11-12 11:33   ` Ian Jackson
2013-11-11 14:57 ` [PATCH v15 19/19] pvh tools: libxl changes to create " George Dunlap
2013-11-12 11:38   ` Ian Jackson
2013-11-11 15:30 ` [PATCH v15 00/18] Introduce PVH domU support George Dunlap
2013-11-11 17:17 ` Keir Fraser
2013-11-12  7:19 ` Dong, Eddie

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1384181841-22739-8-git-send-email-george.dunlap@eu.citrix.com \
    --to=george.dunlap@eu.citrix.com \
    --cc=eddie.dong@intel.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).