From: Wei Liu <wei.liu2@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Wei Liu <wei.liu2@citrix.com>, Jan Beulich <JBeulich@suse.com>
Subject: [PATCH for-next 8/8] x86/domain: move HVM specific code to hvm/domain.c
Date: Mon, 10 Apr 2017 14:27:16 +0100 [thread overview]
Message-ID: <20170410132716.31610-9-wei.liu2@citrix.com> (raw)
In-Reply-To: <20170410132716.31610-1-wei.liu2@citrix.com>
There is only one function arch_set_info_hvm_guest is moved. The
check_segment function is also moved since arch_set_info_hvm_guest is
the only user.
No functional change.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
xen/arch/x86/domain.c | 291 -----------------------------------------
xen/arch/x86/hvm/Makefile | 1 +
xen/arch/x86/hvm/domain.c | 322 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 323 insertions(+), 291 deletions(-)
create mode 100644 xen/arch/x86/hvm/domain.c
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4a2363fc96..80a86e1ba2 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1060,297 +1060,6 @@ int arch_set_info_guest(
#undef c
}
-static inline int check_segment(struct segment_register *reg,
- enum x86_segment seg)
-{
-
- if ( reg->attr.fields.pad != 0 )
- {
- gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n");
- return -EINVAL;
- }
-
- if ( reg->attr.bytes == 0 )
- {
- if ( seg != x86_seg_ds && seg != x86_seg_es )
- {
- gprintk(XENLOG_ERR, "Null selector provided for CS, SS or TR\n");
- return -EINVAL;
- }
- return 0;
- }
-
- if ( seg == x86_seg_tr )
- {
- if ( reg->attr.fields.s )
- {
- gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
- return -EINVAL;
- }
-
- if ( reg->attr.fields.type != SYS_DESC_tss_busy )
- {
- gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n");
- return -EINVAL;
- }
- }
- else if ( !reg->attr.fields.s )
- {
- gprintk(XENLOG_ERR,
- "System segment provided for a code or data segment\n");
- return -EINVAL;
- }
-
- if ( !reg->attr.fields.p )
- {
- gprintk(XENLOG_ERR, "Non-present segment provided\n");
- return -EINVAL;
- }
-
- if ( seg == x86_seg_cs && !(reg->attr.fields.type & 0x8) )
- {
- gprintk(XENLOG_ERR, "Non-code segment provided for CS\n");
- return -EINVAL;
- }
-
- if ( seg == x86_seg_ss &&
- ((reg->attr.fields.type & 0x8) || !(reg->attr.fields.type & 0x2)) )
- {
- gprintk(XENLOG_ERR, "Non-writeable segment provided for SS\n");
- return -EINVAL;
- }
-
- if ( reg->attr.fields.s && seg != x86_seg_ss && seg != x86_seg_cs &&
- (reg->attr.fields.type & 0x8) && !(reg->attr.fields.type & 0x2) )
- {
- gprintk(XENLOG_ERR, "Non-readable segment provided for DS or ES\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Called by VCPUOP_initialise for HVM guests. */
-int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
-{
- struct cpu_user_regs *uregs = &v->arch.user_regs;
- struct segment_register cs, ds, ss, es, tr;
- const char *errstr;
- int rc;
-
- if ( ctx->pad != 0 )
- return -EINVAL;
-
- switch ( ctx->mode )
- {
- default:
- return -EINVAL;
-
- case VCPU_HVM_MODE_32B:
- {
- const struct vcpu_hvm_x86_32 *regs = &ctx->cpu_regs.x86_32;
- uint32_t limit;
-
- if ( ctx->cpu_regs.x86_32.pad1 != 0 ||
- ctx->cpu_regs.x86_32.pad2[0] != 0 ||
- ctx->cpu_regs.x86_32.pad2[1] != 0 ||
- ctx->cpu_regs.x86_32.pad2[2] != 0 )
- return -EINVAL;
-
-#define SEG(s, r) ({ \
- s = (struct segment_register){ .base = (r)->s ## _base, \
- .limit = (r)->s ## _limit, \
- .attr.bytes = (r)->s ## _ar }; \
- /* Set accessed / busy bit for present segments. */ \
- if ( s.attr.fields.p ) \
- s.attr.fields.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2); \
- check_segment(&s, x86_seg_ ## s); })
-
- rc = SEG(cs, regs);
- rc |= SEG(ds, regs);
- rc |= SEG(ss, regs);
- rc |= SEG(es, regs);
- rc |= SEG(tr, regs);
-#undef SEG
-
- if ( rc != 0 )
- return rc;
-
- /* Basic sanity checks. */
- limit = cs.limit;
- if ( cs.attr.fields.g )
- limit = (limit << 12) | 0xfff;
- if ( regs->eip > limit )
- {
- gprintk(XENLOG_ERR, "EIP (%#08x) outside CS limit (%#08x)\n",
- regs->eip, limit);
- return -EINVAL;
- }
-
- if ( ss.attr.fields.dpl != cs.attr.fields.dpl )
- {
- gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL (%u)\n",
- ss.attr.fields.dpl, cs.attr.fields.dpl);
- return -EINVAL;
- }
-
- if ( ds.attr.fields.p && ds.attr.fields.dpl > cs.attr.fields.dpl )
- {
- gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
- ds.attr.fields.dpl, cs.attr.fields.dpl);
- return -EINVAL;
- }
-
- if ( es.attr.fields.p && es.attr.fields.dpl > cs.attr.fields.dpl )
- {
- gprintk(XENLOG_ERR, "ES.DPL (%u) is greater than CS.DPL (%u)\n",
- es.attr.fields.dpl, cs.attr.fields.dpl);
- return -EINVAL;
- }
-
- if ( (regs->efer & EFER_LMA) && !(regs->efer & EFER_LME) )
- {
- gprintk(XENLOG_ERR, "EFER.LMA set without EFER.LME (%#016lx)\n",
- regs->efer);
- return -EINVAL;
- }
-
- uregs->rax = regs->eax;
- uregs->rcx = regs->ecx;
- uregs->rdx = regs->edx;
- uregs->rbx = regs->ebx;
- uregs->rsp = regs->esp;
- uregs->rbp = regs->ebp;
- uregs->rsi = regs->esi;
- uregs->rdi = regs->edi;
- uregs->rip = regs->eip;
- uregs->rflags = regs->eflags;
-
- v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
- v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
- v->arch.hvm_vcpu.guest_efer = regs->efer;
- }
- break;
-
- case VCPU_HVM_MODE_64B:
- {
- const struct vcpu_hvm_x86_64 *regs = &ctx->cpu_regs.x86_64;
-
- /* Basic sanity checks. */
- if ( !is_canonical_address(regs->rip) )
- {
- gprintk(XENLOG_ERR, "RIP contains a non-canonical address (%#lx)\n",
- regs->rip);
- return -EINVAL;
- }
-
- if ( !(regs->cr0 & X86_CR0_PG) )
- {
- gprintk(XENLOG_ERR, "CR0 doesn't have paging enabled (%#016lx)\n",
- regs->cr0);
- return -EINVAL;
- }
-
- if ( !(regs->cr4 & X86_CR4_PAE) )
- {
- gprintk(XENLOG_ERR, "CR4 doesn't have PAE enabled (%#016lx)\n",
- regs->cr4);
- return -EINVAL;
- }
-
- if ( !(regs->efer & EFER_LME) )
- {
- gprintk(XENLOG_ERR, "EFER doesn't have LME enabled (%#016lx)\n",
- regs->efer);
- return -EINVAL;
- }
-
- uregs->rax = regs->rax;
- uregs->rcx = regs->rcx;
- uregs->rdx = regs->rdx;
- uregs->rbx = regs->rbx;
- uregs->rsp = regs->rsp;
- uregs->rbp = regs->rbp;
- uregs->rsi = regs->rsi;
- uregs->rdi = regs->rdi;
- uregs->rip = regs->rip;
- uregs->rflags = regs->rflags;
-
- v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
- v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
- v->arch.hvm_vcpu.guest_efer = regs->efer;
-
-#define SEG(l, a) (struct segment_register){ .limit = (l), .attr.bytes = (a) }
- cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
- ds = ss = es = SEG(~0u, 0xc93);
- tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */
-#undef SEG
- }
- break;
-
- }
-
- if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
-
- if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(v, 0) )
- {
- gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
- v->arch.hvm_vcpu.guest_cr[4]);
- return -EINVAL;
- }
-
- errstr = hvm_efer_valid(v, v->arch.hvm_vcpu.guest_efer, -1);
- if ( errstr )
- {
- gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
- v->arch.hvm_vcpu.guest_efer, errstr);
- return -EINVAL;
- }
-
- hvm_update_guest_cr(v, 0);
- hvm_update_guest_cr(v, 3);
- hvm_update_guest_cr(v, 4);
- hvm_update_guest_efer(v);
-
- if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
- {
- /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
- struct page_info *page = get_page_from_gfn(v->domain,
- v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
- NULL, P2M_ALLOC);
- if ( !page )
- {
- gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
- v->arch.hvm_vcpu.guest_cr[3]);
- return -EINVAL;
- }
-
- v->arch.guest_table = pagetable_from_page(page);
- }
-
- hvm_set_segment_register(v, x86_seg_cs, &cs);
- hvm_set_segment_register(v, x86_seg_ds, &ds);
- hvm_set_segment_register(v, x86_seg_ss, &ss);
- hvm_set_segment_register(v, x86_seg_es, &es);
- hvm_set_segment_register(v, x86_seg_tr, &tr);
-
- /* Sync AP's TSC with BSP's. */
- v->arch.hvm_vcpu.cache_tsc_offset =
- v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
- v->domain->arch.hvm_domain.sync_tsc);
-
- paging_update_paging_modes(v);
-
- v->is_initialised = 1;
- set_bit(_VPF_down, &v->pause_flags);
-
- return 0;
-}
-
int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
{
int rc;
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 0a3d0f4f7e..4dc0773a93 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -4,6 +4,7 @@ subdir-y += vmx
obj-y += asid.o
obj-y += dm.o
obj-bin-y += dom0_build.init.o
+obj-y += domain.o
obj-y += emulate.o
obj-y += hpet.o
obj-y += hvm.o
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
new file mode 100644
index 0000000000..c3358f4f8d
--- /dev/null
+++ b/xen/arch/x86/hvm/domain.c
@@ -0,0 +1,322 @@
+/******************************************************************************
+ * arch/x86/hvm/domain.c
+ *
+ * HVM-specific domain handling
+ */
+
+/*
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+
+#include <xen/domain_page.h>
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/paging.h>
+#include <xen/sched.h>
+
+#include <public/hvm/hvm_vcpu.h>
+
+static inline int check_segment(struct segment_register *reg,
+ enum x86_segment seg)
+{
+
+ if ( reg->attr.fields.pad != 0 )
+ {
+ gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n");
+ return -EINVAL;
+ }
+
+ if ( reg->attr.bytes == 0 )
+ {
+ if ( seg != x86_seg_ds && seg != x86_seg_es )
+ {
+ gprintk(XENLOG_ERR, "Null selector provided for CS, SS or TR\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if ( seg == x86_seg_tr )
+ {
+ if ( reg->attr.fields.s )
+ {
+ gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
+ return -EINVAL;
+ }
+
+ if ( reg->attr.fields.type != SYS_DESC_tss_busy )
+ {
+ gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n");
+ return -EINVAL;
+ }
+ }
+ else if ( !reg->attr.fields.s )
+ {
+ gprintk(XENLOG_ERR,
+ "System segment provided for a code or data segment\n");
+ return -EINVAL;
+ }
+
+ if ( !reg->attr.fields.p )
+ {
+ gprintk(XENLOG_ERR, "Non-present segment provided\n");
+ return -EINVAL;
+ }
+
+ if ( seg == x86_seg_cs && !(reg->attr.fields.type & 0x8) )
+ {
+ gprintk(XENLOG_ERR, "Non-code segment provided for CS\n");
+ return -EINVAL;
+ }
+
+ if ( seg == x86_seg_ss &&
+ ((reg->attr.fields.type & 0x8) || !(reg->attr.fields.type & 0x2)) )
+ {
+ gprintk(XENLOG_ERR, "Non-writeable segment provided for SS\n");
+ return -EINVAL;
+ }
+
+ if ( reg->attr.fields.s && seg != x86_seg_ss && seg != x86_seg_cs &&
+ (reg->attr.fields.type & 0x8) && !(reg->attr.fields.type & 0x2) )
+ {
+ gprintk(XENLOG_ERR, "Non-readable segment provided for DS or ES\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Called by VCPUOP_initialise for HVM guests. */
+int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
+{
+ struct cpu_user_regs *uregs = &v->arch.user_regs;
+ struct segment_register cs, ds, ss, es, tr;
+ const char *errstr;
+ int rc;
+
+ if ( ctx->pad != 0 )
+ return -EINVAL;
+
+ switch ( ctx->mode )
+ {
+ default:
+ return -EINVAL;
+
+ case VCPU_HVM_MODE_32B:
+ {
+ const struct vcpu_hvm_x86_32 *regs = &ctx->cpu_regs.x86_32;
+ uint32_t limit;
+
+ if ( ctx->cpu_regs.x86_32.pad1 != 0 ||
+ ctx->cpu_regs.x86_32.pad2[0] != 0 ||
+ ctx->cpu_regs.x86_32.pad2[1] != 0 ||
+ ctx->cpu_regs.x86_32.pad2[2] != 0 )
+ return -EINVAL;
+
+#define SEG(s, r) ({ \
+ s = (struct segment_register){ .base = (r)->s ## _base, \
+ .limit = (r)->s ## _limit, \
+ .attr.bytes = (r)->s ## _ar }; \
+ /* Set accessed / busy bit for present segments. */ \
+ if ( s.attr.fields.p ) \
+ s.attr.fields.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2); \
+ check_segment(&s, x86_seg_ ## s); })
+
+ rc = SEG(cs, regs);
+ rc |= SEG(ds, regs);
+ rc |= SEG(ss, regs);
+ rc |= SEG(es, regs);
+ rc |= SEG(tr, regs);
+#undef SEG
+
+ if ( rc != 0 )
+ return rc;
+
+ /* Basic sanity checks. */
+ limit = cs.limit;
+ if ( cs.attr.fields.g )
+ limit = (limit << 12) | 0xfff;
+ if ( regs->eip > limit )
+ {
+ gprintk(XENLOG_ERR, "EIP (%#08x) outside CS limit (%#08x)\n",
+ regs->eip, limit);
+ return -EINVAL;
+ }
+
+ if ( ss.attr.fields.dpl != cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL (%u)\n",
+ ss.attr.fields.dpl, cs.attr.fields.dpl);
+ return -EINVAL;
+ }
+
+ if ( ds.attr.fields.p && ds.attr.fields.dpl > cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
+ ds.attr.fields.dpl, cs.attr.fields.dpl);
+ return -EINVAL;
+ }
+
+ if ( es.attr.fields.p && es.attr.fields.dpl > cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "ES.DPL (%u) is greater than CS.DPL (%u)\n",
+ es.attr.fields.dpl, cs.attr.fields.dpl);
+ return -EINVAL;
+ }
+
+ if ( (regs->efer & EFER_LMA) && !(regs->efer & EFER_LME) )
+ {
+ gprintk(XENLOG_ERR, "EFER.LMA set without EFER.LME (%#016lx)\n",
+ regs->efer);
+ return -EINVAL;
+ }
+
+ uregs->rax = regs->eax;
+ uregs->rcx = regs->ecx;
+ uregs->rdx = regs->edx;
+ uregs->rbx = regs->ebx;
+ uregs->rsp = regs->esp;
+ uregs->rbp = regs->ebp;
+ uregs->rsi = regs->esi;
+ uregs->rdi = regs->edi;
+ uregs->rip = regs->eip;
+ uregs->rflags = regs->eflags;
+
+ v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
+ v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
+ v->arch.hvm_vcpu.guest_efer = regs->efer;
+ }
+ break;
+
+ case VCPU_HVM_MODE_64B:
+ {
+ const struct vcpu_hvm_x86_64 *regs = &ctx->cpu_regs.x86_64;
+
+ /* Basic sanity checks. */
+ if ( !is_canonical_address(regs->rip) )
+ {
+ gprintk(XENLOG_ERR, "RIP contains a non-canonical address (%#lx)\n",
+ regs->rip);
+ return -EINVAL;
+ }
+
+ if ( !(regs->cr0 & X86_CR0_PG) )
+ {
+ gprintk(XENLOG_ERR, "CR0 doesn't have paging enabled (%#016lx)\n",
+ regs->cr0);
+ return -EINVAL;
+ }
+
+ if ( !(regs->cr4 & X86_CR4_PAE) )
+ {
+ gprintk(XENLOG_ERR, "CR4 doesn't have PAE enabled (%#016lx)\n",
+ regs->cr4);
+ return -EINVAL;
+ }
+
+ if ( !(regs->efer & EFER_LME) )
+ {
+ gprintk(XENLOG_ERR, "EFER doesn't have LME enabled (%#016lx)\n",
+ regs->efer);
+ return -EINVAL;
+ }
+
+ uregs->rax = regs->rax;
+ uregs->rcx = regs->rcx;
+ uregs->rdx = regs->rdx;
+ uregs->rbx = regs->rbx;
+ uregs->rsp = regs->rsp;
+ uregs->rbp = regs->rbp;
+ uregs->rsi = regs->rsi;
+ uregs->rdi = regs->rdi;
+ uregs->rip = regs->rip;
+ uregs->rflags = regs->rflags;
+
+ v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
+ v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
+ v->arch.hvm_vcpu.guest_efer = regs->efer;
+
+#define SEG(l, a) (struct segment_register){ .limit = (l), .attr.bytes = (a) }
+ cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
+ ds = ss = es = SEG(~0u, 0xc93);
+ tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */
+#undef SEG
+ }
+ break;
+
+ }
+
+ if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
+ v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
+
+ if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(v, 0) )
+ {
+ gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
+ v->arch.hvm_vcpu.guest_cr[4]);
+ return -EINVAL;
+ }
+
+ errstr = hvm_efer_valid(v, v->arch.hvm_vcpu.guest_efer, -1);
+ if ( errstr )
+ {
+ gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
+ v->arch.hvm_vcpu.guest_efer, errstr);
+ return -EINVAL;
+ }
+
+ hvm_update_guest_cr(v, 0);
+ hvm_update_guest_cr(v, 3);
+ hvm_update_guest_cr(v, 4);
+ hvm_update_guest_efer(v);
+
+ if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
+ {
+ /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
+ struct page_info *page = get_page_from_gfn(v->domain,
+ v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
+ {
+ gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
+ v->arch.hvm_vcpu.guest_cr[3]);
+ return -EINVAL;
+ }
+
+ v->arch.guest_table = pagetable_from_page(page);
+ }
+
+ hvm_set_segment_register(v, x86_seg_cs, &cs);
+ hvm_set_segment_register(v, x86_seg_ds, &ds);
+ hvm_set_segment_register(v, x86_seg_ss, &ss);
+ hvm_set_segment_register(v, x86_seg_es, &es);
+ hvm_set_segment_register(v, x86_seg_tr, &tr);
+
+ /* Sync AP's TSC with BSP's. */
+ v->arch.hvm_vcpu.cache_tsc_offset =
+ v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ v->domain->arch.hvm_domain.sync_tsc);
+
+ paging_update_paging_modes(v);
+
+ v->is_initialised = 1;
+ set_bit(_VPF_down, &v->pause_flags);
+
+ return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-04-10 14:06 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-04-10 13:27 [PATCH for-next 0/8] Refactor x86/domain.c Wei Liu
2017-04-10 13:27 ` [PATCH for-next 1/8] xen.h: fix comment for vcpu_guest_context Wei Liu
2017-04-24 9:51 ` Jan Beulich
2017-04-24 10:24 ` Julien Grall
2017-04-24 10:42 ` Wei Liu
2017-04-24 12:29 ` Jan Beulich
2017-04-24 12:55 ` Wei Liu
2017-04-10 13:27 ` [PATCH for-next 2/8] x86/domain: factor out pv_vcpu_initialise Wei Liu
2017-04-24 9:57 ` Jan Beulich
2017-04-24 11:16 ` Wei Liu
2017-04-10 13:27 ` [PATCH for-next 3/8] x86/domain: factor out pv_vcpu_destroy Wei Liu
2017-04-24 9:59 ` Jan Beulich
2017-04-10 13:27 ` [PATCH for-next 4/8] x86/domain: push some code down to hvm_domain_initialise Wei Liu
2017-04-10 15:19 ` Andrew Cooper
2017-04-25 12:15 ` Wei Liu
2017-04-24 10:10 ` Jan Beulich
2017-04-24 14:55 ` Wei Liu
2017-04-10 13:27 ` [PATCH for-next 5/8] x86/domain: factor out pv_domain_destroy Wei Liu
2017-04-10 15:04 ` Andrew Cooper
2017-04-10 15:12 ` Wei Liu
2017-04-10 15:16 ` Andrew Cooper
2017-04-10 15:22 ` Wei Liu
2017-04-10 15:27 ` Andrew Cooper
2017-04-24 10:11 ` Jan Beulich
2017-04-10 13:27 ` [PATCH for-next 6/8] x86/domain: factor out pv_domain_initialise Wei Liu
2017-04-24 10:20 ` Jan Beulich
2017-04-25 13:37 ` Wei Liu
2017-04-10 13:27 ` [PATCH for-next 7/8] x86/domain: move PV specific code to pv/domain.c Wei Liu
2017-04-24 12:39 ` Jan Beulich
2017-04-24 14:24 ` Wei Liu
2017-04-24 15:57 ` Jan Beulich
2017-04-10 13:27 ` Wei Liu [this message]
2017-04-24 12:41 ` [PATCH for-next 8/8] x86/domain: move HVM specific code to hvm/domain.c Jan Beulich
2017-04-24 13:12 ` Roger Pau Monné
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170410132716.31610-9-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).