xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Stefano Stabellini <stefano.stabellini@citrix.com>,
	Ian Campbell <ian.campbell@citrix.com>,
	Jan Beulich <jbeulich@suse.com>,
	Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH v4 26/31] xen/x86: allow HVM guests to use hypercalls to bring up vCPUs
Date: Fri, 7 Aug 2015 12:18:03 +0200	[thread overview]
Message-ID: <1438942688-7610-27-git-send-email-roger.pau@citrix.com> (raw)
In-Reply-To: <1438942688-7610-1-git-send-email-roger.pau@citrix.com>

Allow the usage of the VCPUOP_initialise, VCPUOP_up, VCPUOP_down and
VCPUOP_is_up hypercalls from HVM guests.

This patch introduces a new structure (vcpu_hvm_context) that should be used
in conjuction with the VCPUOP_initialise hypercall in order to initialize
vCPUs for HVM guests.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@citrix.com>
---
 xen/arch/arm/domain.c             |  24 ++++++
 xen/arch/x86/domain.c             | 156 +++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/hvm.c            |   8 ++
 xen/common/domain.c               |  16 +---
 xen/include/public/hvm/hvm_vcpu.h | 168 ++++++++++++++++++++++++++++++++++++++
 xen/include/xen/domain.h          |   2 +
 6 files changed, 359 insertions(+), 15 deletions(-)
 create mode 100644 xen/include/public/hvm/hvm_vcpu.h

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index b2bfc7d..b20035d 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -752,6 +752,30 @@ int arch_set_info_guest(
     return 0;
 }
 
+int arch_initialize_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    struct vcpu_guest_context *ctxt;
+    struct domain *d = current->domain;
+    int rc;
+
+    if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
+        return -ENOMEM;
+
+    if ( copy_from_guest(ctxt, arg, 1) )
+    {
+        free_vcpu_guest_context(ctxt);
+        return -EFAULT;
+    }
+
+    domain_lock(d);
+    rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
+    domain_unlock(d);
+
+    free_vcpu_guest_context(ctxt);
+
+    return rc;
+}
+
 int arch_vcpu_reset(struct vcpu *v)
 {
     vcpu_end_shutdown_deferral(v);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 432fe43..4a7f8d9 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -37,6 +37,7 @@
 #include <xen/wait.h>
 #include <xen/guest_access.h>
 #include <public/sysctl.h>
+#include <public/hvm/hvm_vcpu.h>
 #include <asm/regs.h>
 #include <asm/mc146818rtc.h>
 #include <asm/system.h>
@@ -1135,6 +1136,161 @@ int arch_set_info_guest(
 #undef c
 }
 
+/* Called by VCPUOP_initialise for HVM guests. */
+static int arch_set_info_hvm_guest(struct vcpu *v, vcpu_hvm_context_t *ctx)
+{
+    struct segment_register seg;
+
+#define get_context_seg(ctx, seg, f)                                        \
+    (ctx)->mode == VCPU_HVM_MODE_16B ? (ctx)->cpu_regs.x86_16.seg##_##f :   \
+    (ctx)->mode == VCPU_HVM_MODE_32B ? (ctx)->cpu_regs.x86_32.seg##_##f :   \
+    (ctx)->cpu_regs.x86_64.seg##_##f
+
+#define get_context_gpr(ctx, gpr)                                           \
+    (ctx)->mode == VCPU_HVM_MODE_16B ? (ctx)->cpu_regs.x86_16.gpr :         \
+    (ctx)->mode == VCPU_HVM_MODE_32B ? (ctx)->cpu_regs.x86_32.e##gpr :      \
+    (ctx)->cpu_regs.x86_64.r##gpr
+
+#define get_context_field(ctx, field)                                       \
+    (ctx)->mode == VCPU_HVM_MODE_16B ? (ctx)->cpu_regs.x86_16.field :       \
+    (ctx)->mode == VCPU_HVM_MODE_32B ? (ctx)->cpu_regs.x86_32.field :       \
+    (ctx)->cpu_regs.x86_64.field
+
+    memset(&seg, 0, sizeof(seg));
+
+    if ( !paging_mode_hap(v->domain) )
+        v->arch.guest_table = pagetable_null();
+
+    v->arch.user_regs.rax = get_context_gpr(ctx, ax);
+    v->arch.user_regs.rcx = get_context_gpr(ctx, cx);
+    v->arch.user_regs.rdx = get_context_gpr(ctx, dx);
+    v->arch.user_regs.rbx = get_context_gpr(ctx, bx);
+    v->arch.user_regs.rsp = get_context_gpr(ctx, sp);
+    v->arch.user_regs.rbp = get_context_gpr(ctx, bp);
+    v->arch.user_regs.rsi = get_context_gpr(ctx, si);
+    v->arch.user_regs.rdi = get_context_gpr(ctx, di);
+    v->arch.user_regs.rip = get_context_gpr(ctx, ip);
+    v->arch.user_regs.rflags = get_context_gpr(ctx, flags);
+
+    v->arch.hvm_vcpu.guest_cr[0] = get_context_field(ctx, cr0) | X86_CR0_ET;
+    hvm_update_guest_cr(v, 0);
+    v->arch.hvm_vcpu.guest_cr[4] = get_context_field(ctx, cr4);
+    hvm_update_guest_cr(v, 4);
+
+    switch ( ctx->mode )
+    {
+    case VCPU_HVM_MODE_32B:
+        v->arch.hvm_vcpu.guest_efer = ctx->cpu_regs.x86_32.efer;
+        hvm_update_guest_efer(v);
+        v->arch.hvm_vcpu.guest_cr[3] = ctx->cpu_regs.x86_32.cr3;
+        hvm_update_guest_cr(v, 3);
+        break;
+    case VCPU_HVM_MODE_64B:
+        v->arch.user_regs.r8 = ctx->cpu_regs.x86_64.r8;
+        v->arch.user_regs.r9 = ctx->cpu_regs.x86_64.r9;
+        v->arch.user_regs.r10 = ctx->cpu_regs.x86_64.r10;
+        v->arch.user_regs.r11 = ctx->cpu_regs.x86_64.r11;
+        v->arch.user_regs.r12 = ctx->cpu_regs.x86_64.r12;
+        v->arch.user_regs.r13 = ctx->cpu_regs.x86_64.r13;
+        v->arch.user_regs.r14 = ctx->cpu_regs.x86_64.r14;
+        v->arch.user_regs.r15 = ctx->cpu_regs.x86_64.r15;
+        v->arch.hvm_vcpu.guest_efer = ctx->cpu_regs.x86_64.efer;
+        hvm_update_guest_efer(v);
+        v->arch.hvm_vcpu.guest_cr[3] = ctx->cpu_regs.x86_64.cr3;
+        hvm_update_guest_cr(v, 3);
+        break;
+    default:
+        break;
+    }
+
+    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
+    {
+        /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
+        struct page_info *page = get_page_from_gfn(v->domain,
+                                 v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
+                                 NULL, P2M_ALLOC);
+        if ( !page )
+        {
+            gdprintk(XENLOG_ERR, "Invalid CR3\n");
+            domain_crash(v->domain);
+            return -EINVAL;
+        }
+
+        v->arch.guest_table = pagetable_from_page(page);
+    }
+
+    seg.base = get_context_seg(ctx, cs, base);
+    seg.limit = get_context_seg(ctx, cs, limit);
+    seg.attr.bytes = get_context_seg(ctx, cs, ar);
+    hvm_set_segment_register(v, x86_seg_cs, &seg);
+    seg.base = get_context_seg(ctx, ds, base);
+    seg.limit = get_context_seg(ctx, ds, limit);
+    seg.attr.bytes = get_context_seg(ctx, ds, ar);
+    hvm_set_segment_register(v, x86_seg_ds, &seg);
+    seg.base = get_context_seg(ctx, ss, base);
+    seg.limit = get_context_seg(ctx, ss, limit);
+    seg.attr.bytes = get_context_seg(ctx, ss, ar);
+    hvm_set_segment_register(v, x86_seg_ss, &seg);
+    seg.base = get_context_seg(ctx, tr, base);
+    seg.limit = get_context_seg(ctx, tr, limit);
+    seg.attr.bytes = get_context_seg(ctx, tr, ar);
+    hvm_set_segment_register(v, x86_seg_tr, &seg);
+
+    /* Sync AP's TSC with BSP's. */
+    v->arch.hvm_vcpu.cache_tsc_offset =
+        v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+                             v->domain->arch.hvm_domain.sync_tsc);
+
+    v->arch.hvm_vcpu.msr_tsc_adjust = 0;
+
+    paging_update_paging_modes(v);
+
+    v->arch.flags |= TF_kernel_mode;
+    v->is_initialised = 1;
+    set_bit(_VPF_down, &v->pause_flags);
+
+    return 0;
+#undef get_context_field
+#undef get_context_gpr
+#undef get_context_seg
+}
+
+int arch_initialize_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    struct vcpu_guest_context *ctxt;
+    struct vcpu_hvm_context hvm_ctx;
+    struct domain *d = current->domain;
+    int rc;
+
+    if ( is_hvm_vcpu(v) )
+    {
+        if ( copy_from_guest(&hvm_ctx, arg, 1) )
+            return -EFAULT;
+
+        domain_lock(d);
+        rc = v->is_initialised ? -EEXIST : arch_set_info_hvm_guest(v, &hvm_ctx);
+        domain_unlock(d);
+    } else {
+        if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
+            return -ENOMEM;
+
+        if ( copy_from_guest(ctxt, arg, 1) )
+        {
+            free_vcpu_guest_context(ctxt);
+            return -EFAULT;
+        }
+
+        domain_lock(d);
+        rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
+        domain_unlock(d);
+
+        free_vcpu_guest_context(ctxt);
+    }
+
+    return rc;
+}
+
 int arch_vcpu_reset(struct vcpu *v)
 {
     if ( is_pv_vcpu(v) )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c778a20..15ea5e2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4972,6 +4972,10 @@ static long hvm_vcpu_op(
     case VCPUOP_stop_singleshot_timer:
     case VCPUOP_register_vcpu_info:
     case VCPUOP_register_vcpu_time_memory_area:
+    case VCPUOP_initialise:
+    case VCPUOP_up:
+    case VCPUOP_down:
+    case VCPUOP_is_up:
         rc = do_vcpu_op(cmd, vcpuid, arg);
         break;
     default:
@@ -5030,6 +5034,10 @@ static long hvm_vcpu_op_compat32(
     case VCPUOP_stop_singleshot_timer:
     case VCPUOP_register_vcpu_info:
     case VCPUOP_register_vcpu_time_memory_area:
+    case VCPUOP_initialise:
+    case VCPUOP_up:
+    case VCPUOP_down:
+    case VCPUOP_is_up:
         rc = compat_vcpu_op(cmd, vcpuid, arg);
         break;
     default:
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 1b9fcfc..f97e7f4 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1173,7 +1173,6 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     struct domain *d = current->domain;
     struct vcpu *v;
-    struct vcpu_guest_context *ctxt;
     long rc = 0;
 
     if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
@@ -1185,20 +1184,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( v->vcpu_info == &dummy_vcpu_info )
             return -EINVAL;
 
-        if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
-            return -ENOMEM;
-
-        if ( copy_from_guest(ctxt, arg, 1) )
-        {
-            free_vcpu_guest_context(ctxt);
-            return -EFAULT;
-        }
-
-        domain_lock(d);
-        rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
-        domain_unlock(d);
-
-        free_vcpu_guest_context(ctxt);
+        rc = arch_initialize_vcpu(v, arg);
 
         if ( rc == -ERESTART )
             rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh",
diff --git a/xen/include/public/hvm/hvm_vcpu.h b/xen/include/public/hvm/hvm_vcpu.h
new file mode 100644
index 0000000..db86edd
--- /dev/null
+++ b/xen/include/public/hvm/hvm_vcpu.h
@@ -0,0 +1,168 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_HVM_VCPU_H__
+#define __XEN_PUBLIC_HVM_HVM_VCPU_H__
+
+#include "../xen.h"
+
+struct vcpu_hvm_x86_16 {
+    uint16_t ax;
+    uint16_t cx;
+    uint16_t dx;
+    uint16_t bx;
+    uint16_t sp;
+    uint16_t bp;
+    uint16_t si;
+    uint16_t di;
+    uint16_t ip;
+    uint16_t flags;
+
+    uint32_t cr0;
+    uint32_t cr4;
+
+    uint32_t cs_base;
+    uint32_t ds_base;
+    uint32_t ss_base;
+    uint32_t tr_base;
+    uint32_t cs_limit;
+    uint32_t ds_limit;
+    uint32_t ss_limit;
+    uint32_t tr_limit;
+    uint16_t cs_ar;
+    uint16_t ds_ar;
+    uint16_t ss_ar;
+    uint16_t tr_ar;
+};
+
+struct vcpu_hvm_x86_32 {
+    uint32_t eax;
+    uint32_t ecx;
+    uint32_t edx;
+    uint32_t ebx;
+    uint32_t esp;
+    uint32_t ebp;
+    uint32_t esi;
+    uint32_t edi;
+    uint32_t eip;
+    uint16_t eflags;
+
+    uint32_t cr0;
+    uint32_t cr3;
+    uint32_t cr4;
+    uint64_t efer;
+
+    uint32_t cs_base;
+    uint32_t ds_base;
+    uint32_t ss_base;
+    uint32_t tr_base;
+    uint32_t cs_limit;
+    uint32_t ds_limit;
+    uint32_t ss_limit;
+    uint32_t tr_limit;
+    uint16_t cs_ar;
+    uint16_t ds_ar;
+    uint16_t ss_ar;
+    uint16_t tr_ar;
+};
+
+struct vcpu_hvm_x86_64 {
+    uint64_t rax;
+    uint64_t rcx;
+    uint64_t rdx;
+    uint64_t rbx;
+    uint64_t rsp;
+    uint64_t rbp;
+    uint64_t rsi;
+    uint64_t rdi;
+    uint64_t r8;
+    uint64_t r9;
+    uint64_t r10;
+    uint64_t r11;
+    uint64_t r12;
+    uint64_t r13;
+    uint64_t r14;
+    uint64_t r15;
+    uint64_t rip;
+    uint64_t rflags;
+
+    uint64_t cr0;
+    uint64_t cr3;
+    uint64_t cr4;
+    uint64_t efer;
+
+    uint32_t cs_base;
+    uint32_t ds_base;
+    uint32_t ss_base;
+    uint32_t tr_base;
+    uint32_t cs_limit;
+    uint32_t ds_limit;
+    uint32_t ss_limit;
+    uint32_t tr_limit;
+    uint16_t cs_ar;
+    uint16_t ds_ar;
+    uint16_t ss_ar;
+    uint16_t tr_ar;
+};
+
+/*
+ * The layout of the _ar fields of the segment registers is the
+ * following:
+ *
+ * Bits [0,3]: type (bits 40-43).
+ * Bit      4: s    (descriptor type, bit 44).
+ * Bit  [5,6]: dpl  (descriptor privilege level, bits 45-46).
+ * Bit      7: p    (segment-present, bit 47).
+ * Bit      8: avl  (available for system software, bit 52).
+ * Bit      9: l    (64-bit code segment, bit 53).
+ * Bit     10: db   (meaning depends on the segment, bit 54).
+ * Bit     11: g    (granularity, bit 55)
+ *
+ * A more complete description of the meaning of this fields can be
+ * obtained from the Intel SDM, Volume 3, section 3.4.5.
+ */
+
+struct vcpu_hvm_context {
+#define VCPU_HVM_MODE_16B 0  /* 16bit fields of the structure will be used. */
+#define VCPU_HVM_MODE_32B 1  /* 32bit fields of the structure will be used. */
+#define VCPU_HVM_MODE_64B 2  /* 64bit fields of the structure will be used. */
+    uint32_t mode;
+
+    /* CPU registers. */
+    union {
+        struct vcpu_hvm_x86_16 x86_16;
+        struct vcpu_hvm_x86_32 x86_32;
+        struct vcpu_hvm_x86_64 x86_64;
+    } cpu_regs;
+};
+typedef struct vcpu_hvm_context vcpu_hvm_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_hvm_context_t);
+
+#endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 848db8a..21690be 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -68,6 +68,8 @@ void arch_domain_unpause(struct domain *d);
 int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);
 void arch_get_info_guest(struct vcpu *, vcpu_guest_context_u);
 
+int arch_initialize_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
+
 int domain_relinquish_resources(struct domain *d);
 
 void dump_pageframe_info(struct domain *d);
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2015-08-07 10:45 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-08-07 10:17 [PATCH v4 00/31] Introduce HVM without dm and new boot ABI Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 01/31] libxl: fix libxl__build_hvm error handling Roger Pau Monne
2015-08-07 10:49   ` Wei Liu
2015-08-07 10:55     ` Roger Pau Monné
2015-08-07 11:03       ` Wei Liu
2015-08-07 11:28         ` Roger Pau Monné
2015-08-07 11:43           ` Wei Liu
2015-08-13 15:04   ` Ian Jackson
2015-08-13 15:06   ` Wei Liu
2015-08-16  8:01     ` Ian Campbell
2015-08-07 10:17 ` [PATCH v4 02/31] libxc: split x86 HVM setup_guest into smaller logical functions Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 03/31] libxc: unify xc_dom_p2m_{host/guest} Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 04/31] libxc: introduce the notion of a container type Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 05/31] libxc: introduce a domain loader for HVM guest firmware Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 06/31] libxc: make arch_setup_meminit a xc_dom_arch hook Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 07/31] libxc: make arch_setup_boot{init/late} xc_dom_arch hooks Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 08/31] libxc: rework BSP initialization Roger Pau Monne
2015-08-07 11:31   ` Wei Liu
2015-08-07 12:04     ` Roger Pau Monné
2015-08-07 12:06       ` Wei Liu
2015-08-07 15:30         ` Andrew Cooper
2015-08-07 15:22   ` Andrew Cooper
2015-08-07 10:17 ` [PATCH v4 09/31] libxc: introduce a xc_dom_arch for hvm-3.0-x86_32 guests Roger Pau Monne
2015-08-07 12:00   ` Wei Liu
2015-08-07 15:58   ` Andrew Cooper
2015-08-07 10:17 ` [PATCH v4 10/31] libxl: switch HVM domain building to use xc_dom_* helpers Roger Pau Monne
2015-08-07 12:12   ` Wei Liu
2015-08-07 10:17 ` [PATCH v4 11/31] libxc: remove dead HVM building code Roger Pau Monne
2015-08-07 12:12   ` Wei Liu
2015-08-07 15:59   ` Andrew Cooper
2015-08-07 10:17 ` [PATCH v4 12/31] xen/x86: add bitmap of enabled emulated devices Roger Pau Monne
2015-08-07 16:08   ` Andrew Cooper
2015-08-07 10:17 ` [PATCH v4 13/31] xen/x86: allow disabling the emulated local apic Roger Pau Monne
2015-08-07 14:09   ` Boris Ostrovsky
2015-08-07 15:41     ` Roger Pau Monné
2015-08-07 16:11       ` Boris Ostrovsky
2015-08-07 16:18         ` Roger Pau Monné
2015-08-07 16:29   ` Andrew Cooper
2015-08-07 10:17 ` [PATCH v4 14/31] xen/x86: allow disabling the emulated HPET Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 15/31] xen/x86: allow disabling the pmtimer Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 16/31] xen/x86: allow disabling the emulated RTC Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 17/31] xen/x86: allow disabling the emulated IO APIC Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 18/31] xen/x86: allow disabling the emulated PIC Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 19/31] xen/x86: allow disabling the emulated pmu Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 20/31] xen/x86: allow disabling the emulated VGA Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 21/31] xen/x86: allow disabling the emulated IOMMU Roger Pau Monne
2015-08-07 10:17 ` [PATCH v4 22/31] xen/x86: allow disabling all emulated devices inside of Xen Roger Pau Monne
2015-08-07 16:32   ` Andrew Cooper
2015-08-07 10:18 ` [PATCH v4 23/31] elfnotes: intorduce a new PHYS_ENTRY elfnote Roger Pau Monne
2015-08-07 16:37   ` Andrew Cooper
2015-08-07 16:44   ` Wei Liu
2015-08-07 10:18 ` [PATCH v4 24/31] libxc: allow creating domains without emulated devices Roger Pau Monne
2015-08-07 12:18   ` Wei Liu
2015-08-07 16:36   ` Andrew Cooper
2015-08-17 15:55     ` Roger Pau Monné
2015-08-18 16:25       ` Andrew Cooper
2015-08-07 10:18 ` [PATCH v4 25/31] xen: allow HVM guests to use XENMEM_memory_map Roger Pau Monne
2015-08-07 12:22   ` Wei Liu
2015-08-07 15:44     ` Roger Pau Monné
2015-08-07 15:55       ` Wei Liu
2015-08-07 16:00         ` Roger Pau Monné
2015-08-07 10:18 ` Roger Pau Monne [this message]
2015-08-07 19:03   ` [PATCH v4 26/31] xen/x86: allow HVM guests to use hypercalls to bring up vCPUs Andrew Cooper
2015-08-12  8:22     ` Jan Beulich
2015-08-07 10:18 ` [PATCH v4 27/31] xenconsole: try to attach to PV console if HVM fails Roger Pau Monne
2015-08-07 12:25   ` Wei Liu
2015-08-07 10:18 ` [PATCH v4 28/31] libxc/xen: introduce HVM_PARAM_CMDLINE_PFN Roger Pau Monne
2015-08-07 12:32   ` Wei Liu
2015-08-07 17:30   ` Andrew Cooper
2015-08-18 10:01     ` Roger Pau Monné
2015-08-18 16:32       ` Andrew Cooper
2015-08-07 10:18 ` [PATCH v4 29/31] libxc/xen: introduce HVM_PARAM_MODLIST_PFN Roger Pau Monne
2015-08-07 12:36   ` Wei Liu
2015-08-07 10:18 ` [PATCH v4 30/31] libxc: switch xc_dom_elfloader to be used with HVMlite domains Roger Pau Monne
2015-08-07 17:43   ` Andrew Cooper
2015-08-07 10:18 ` [PATCH v4 31/31] libxl: allow the creation of HVM domains without a device model Roger Pau Monne
2015-08-07 12:58   ` Wei Liu
2015-08-07 15:51     ` Roger Pau Monné
2015-08-07 16:24       ` Wei Liu
2015-08-07 18:41         ` Andrew Cooper
2015-08-10  7:57           ` Paul Durrant
2015-08-07 15:18   ` Konrad Rzeszutek Wilk
2015-08-07 15:57     ` Roger Pau Monné
2015-08-07 17:52       ` Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1438942688-7610-27-git-send-email-roger.pau@citrix.com \
    --to=roger.pau@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=stefano.stabellini@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).