public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH][Resend] Split kvm_vcpu to support new archs.
@ 2007-10-12 10:39 Zhang, Xiantao
       [not found] ` <42DFA526FC41B1429CE7279EF83C6BDC808D7E-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 7+ messages in thread
From: Zhang, Xiantao @ 2007-10-12 10:39 UTC (permalink / raw)
  To: Avi Kivity
  Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	carsteno-tA70FqPdS9bQT0dZR+AlfA

Split kvm_vcpu to support new archs. Define a new sub field
kvm_arch_vcpu to hold arch-specific sections. Maybe we should change
x86.h to kvm_arch.h, once new arches are pushed into upstream. Every
archs can define its own kvm_arch.h. 
>From 07e382a392c51ee8b56a556e53ee816ecaebccb4 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Date: Fri, 12 Oct 2007 18:29:36 +0800

Signed-off-by: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
---
 drivers/kvm/ioapic.c      |   13 +-
 drivers/kvm/kvm.h         |   47 ++------
 drivers/kvm/kvm_main.c    |  287
++++++++++++++++++++++-----------------------
 drivers/kvm/lapic.c       |   69 ++++++------
 drivers/kvm/mmu.c         |   12 +-
 drivers/kvm/paging_tmpl.h |    6 +-
 drivers/kvm/svm.c         |  107 ++++++++++-------
 drivers/kvm/vmx.c         |  190 +++++++++++++++---------------
 drivers/kvm/x86.c         |   13 +-
 drivers/kvm/x86.h         |   63 +++++++++--
 drivers/kvm/x86_emulate.c |   18 ++--
 11 files changed, 427 insertions(+), 398 deletions(-)

diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
index 3b69541..df67292 100644
--- a/drivers/kvm/ioapic.c
+++ b/drivers/kvm/ioapic.c
@@ -156,7 +156,7 @@ static u32 ioapic_get_delivery_bitmask(struct
kvm_ioapic *ioapic, u8 dest,
 	if (dest_mode == 0) {	/* Physical mode. */
 		if (dest == 0xFF) {	/* Broadcast. */
 			for (i = 0; i < KVM_MAX_VCPUS; ++i)
-				if (kvm->vcpus[i] &&
kvm->vcpus[i]->apic)
+				if (kvm->vcpus[i] &&
kvm->vcpus[i]->arch.apic)
 					mask |= 1 << i;
 			return mask;
 		}
@@ -164,8 +164,9 @@ static u32 ioapic_get_delivery_bitmask(struct
kvm_ioapic *ioapic, u8 dest,
 			vcpu = kvm->vcpus[i];
 			if (!vcpu)
 				continue;
-			if (kvm_apic_match_physical_addr(vcpu->apic,
dest)) {
-				if (vcpu->apic)
+			if
(kvm_apic_match_physical_addr(vcpu->arch.apic,
+								dest)) {
+				if (vcpu->arch.apic)
 					mask = 1 << i;
 				break;
 			}
@@ -175,8 +176,8 @@ static u32 ioapic_get_delivery_bitmask(struct
kvm_ioapic *ioapic, u8 dest,
 			vcpu = kvm->vcpus[i];
 			if (!vcpu)
 				continue;
-			if (vcpu->apic &&
-			    kvm_apic_match_logical_addr(vcpu->apic,
dest))
+			if (vcpu->arch.apic &&
+			    kvm_apic_match_logical_addr(vcpu->arch.apic,
dest))
 				mask |= 1 << vcpu->vcpu_id;
 		}
 	ioapic_debug("mask %x", mask);
@@ -224,7 +225,7 @@ static void ioapic_deliver(struct kvm_ioapic
*ioapic, int irq)
 			deliver_bitmask &= ~(1 << vcpu_id);
 			vcpu = ioapic->kvm->vcpus[vcpu_id];
 			if (vcpu) {
-				target = vcpu->apic;
+				target = vcpu->arch.apic;
 				ioapic_inj_irq(ioapic, target, vector,
 					       trig_mode,
delivery_mode);
 			}
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 4a52d6e..fae78d6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -307,31 +307,21 @@ struct kvm_io_device *kvm_io_bus_find_dev(struct
kvm_io_bus *bus, gpa_t addr);
 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
 			     struct kvm_io_device *dev);
 
+
+#include "x86.h"
+
 struct kvm_vcpu {
 	struct kvm *kvm;
 	struct preempt_notifier preempt_notifier;
 	int vcpu_id;
 	struct mutex mutex;
 	int   cpu;
-	u64 host_tsc;
 	struct kvm_run *run;
 	int interrupt_window_open;
 	int guest_mode;
 	unsigned long requests;
 	unsigned long irq_summary; /* bit vector: 1 per word in
irq_pending */
 	DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
-	unsigned long regs[NR_VCPU_REGS]; /* for rsp:
vcpu_load_rsp_rip() */
-	unsigned long rip;      /* needs vcpu_load_rsp_rip() */
-
-	unsigned long cr0;
-	unsigned long cr2;
-	unsigned long cr3;
-	unsigned long cr4;
-	unsigned long cr8;
-	u64 pdptrs[4]; /* pae */
-	u64 shadow_efer;
-	u64 apic_base;
-	struct kvm_lapic *apic;    /* kernel irqchip context */
 #define VCPU_MP_STATE_RUNNABLE          0
 #define VCPU_MP_STATE_UNINITIALIZED     1
 #define VCPU_MP_STATE_INIT_RECEIVED     2
@@ -339,7 +329,6 @@ struct kvm_vcpu {
 #define VCPU_MP_STATE_HALTED            4
 	int mp_state;
 	int sipi_vector;
-	u64 ia32_misc_enable_msr;
 
 	struct kvm_mmu mmu;
 
@@ -354,10 +343,6 @@ struct kvm_vcpu {
 
 	struct kvm_guest_debug guest_debug;
 
-	struct i387_fxsave_struct host_fx_image;
-	struct i387_fxsave_struct guest_fx_image;
-	int fpu_active;
-	int guest_fpu_loaded;
 
 	int mmio_needed;
 	int mmio_read_completed;
@@ -365,7 +350,6 @@ struct kvm_vcpu {
 	int mmio_size;
 	unsigned char mmio_data[8];
 	gpa_t mmio_phys_addr;
-	gva_t mmio_fault_cr2;
 	struct kvm_pio_request pio;
 	void *pio_data;
 	wait_queue_head_t wq;
@@ -375,24 +359,9 @@ struct kvm_vcpu {
 
 	struct kvm_stat stat;
 
-	struct {
-		int active;
-		u8 save_iopl;
-		struct kvm_save_segment {
-			u16 selector;
-			unsigned long base;
-			u32 limit;
-			u32 ar;
-		} tr, es, ds, fs, gs;
-	} rmode;
 	int halt_request; /* real mode on Intel only */
+	struct kvm_arch_vcpu arch; /*Arch-specific fields*/
 
-	int cpuid_nent;
-	struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
-
-	/* emulate context */
-
-	struct x86_emulate_ctxt emulate_ctxt;
 };
 
 struct kvm_mem_alias {
@@ -689,7 +658,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu
*vcpu)
 static inline int is_long_mode(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
-	return vcpu->shadow_efer & EFER_LME;
+	return vcpu->arch.shadow_efer & EFER_LME;
 #else
 	return 0;
 #endif
@@ -697,17 +666,17 @@ static inline int is_long_mode(struct kvm_vcpu
*vcpu)
 
 static inline int is_pae(struct kvm_vcpu *vcpu)
 {
-	return vcpu->cr4 & X86_CR4_PAE;
+	return vcpu->arch.cr4 & X86_CR4_PAE;
 }
 
 static inline int is_pse(struct kvm_vcpu *vcpu)
 {
-	return vcpu->cr4 & X86_CR4_PSE;
+	return vcpu->arch.cr4 & X86_CR4_PSE;
 }
 
 static inline int is_paging(struct kvm_vcpu *vcpu)
 {
-	return vcpu->cr0 & X86_CR0_PG;
+	return vcpu->arch.cr0 & X86_CR0_PG;
 }
 
 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot
*slot)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index a0f8366..b54d9e6 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -16,7 +16,6 @@
  */
 
 #include "kvm.h"
-#include "x86.h"
 #include "x86_emulate.h"
 #include "segment_descriptor.h"
 #include "irq.h"
@@ -153,23 +152,23 @@ static inline int valid_vcpu(int n)
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-	if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
+	if (!vcpu->arch.fpu_active || vcpu->arch.guest_fpu_loaded)
 		return;
 
-	vcpu->guest_fpu_loaded = 1;
-	fx_save(&vcpu->host_fx_image);
-	fx_restore(&vcpu->guest_fx_image);
+	vcpu->arch.guest_fpu_loaded = 1;
+	fx_save(&vcpu->arch.host_fx_image);
+	fx_restore(&vcpu->arch.guest_fx_image);
 }
 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-	if (!vcpu->guest_fpu_loaded)
+	if (!vcpu->arch.guest_fpu_loaded)
 		return;
 
-	vcpu->guest_fpu_loaded = 0;
-	fx_save(&vcpu->guest_fx_image);
-	fx_restore(&vcpu->host_fx_image);
+	vcpu->arch.guest_fpu_loaded = 0;
+	fx_save(&vcpu->arch.guest_fx_image);
+	fx_restore(&vcpu->arch.host_fx_image);
 }
 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 
@@ -447,7 +446,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu,
unsigned long cr3)
 	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
 	int i;
 	int ret;
-	u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
+	u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
 
 	mutex_lock(&vcpu->kvm->lock);
 	ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
@@ -464,7 +463,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu,
unsigned long cr3)
 	}
 	ret = 1;
 
-	memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
+	memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
 out:
 	mutex_unlock(&vcpu->kvm->lock);
 
@@ -475,7 +474,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long
cr0)
 {
 	if (cr0 & CR0_RESERVED_BITS) {
 		printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits
0x%lx\n",
-		       cr0, vcpu->cr0);
+		       cr0, vcpu->arch.cr0);
 		inject_gp(vcpu);
 		return;
 	}
@@ -495,7 +494,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long
cr0)
 
 	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
 #ifdef CONFIG_X86_64
-		if ((vcpu->shadow_efer & EFER_LME)) {
+		if ((vcpu->arch.shadow_efer & EFER_LME)) {
 			int cs_db, cs_l;
 
 			if (!is_pae(vcpu)) {
@@ -514,7 +513,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long
cr0)
 			}
 		} else
 #endif
-		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
+		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
{
 			printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
 			       "reserved bits\n");
 			inject_gp(vcpu);
@@ -524,7 +523,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long
cr0)
 	}
 
 	kvm_x86_ops->set_cr0(vcpu, cr0);
-	vcpu->cr0 = cr0;
+	vcpu->arch.cr0 = cr0;
 
 	mutex_lock(&vcpu->kvm->lock);
 	kvm_mmu_reset_context(vcpu);
@@ -535,7 +534,7 @@ EXPORT_SYMBOL_GPL(set_cr0);
 
 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 {
-	set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
+	set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
 }
 EXPORT_SYMBOL_GPL(lmsw);
 
@@ -555,7 +554,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long
cr4)
 			return;
 		}
 	} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 &
X86_CR4_PAE)
-		   && !load_pdptrs(vcpu, vcpu->cr3)) {
+		   && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
 		printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved
bits\n");
 		inject_gp(vcpu);
 		return;
@@ -567,7 +566,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long
cr4)
 		return;
 	}
 	kvm_x86_ops->set_cr4(vcpu, cr4);
-	vcpu->cr4 = cr4;
+	vcpu->arch.cr4 = cr4;
 	mutex_lock(&vcpu->kvm->lock);
 	kvm_mmu_reset_context(vcpu);
 	mutex_unlock(&vcpu->kvm->lock);
@@ -616,7 +615,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long
cr3)
 	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
 		inject_gp(vcpu);
 	else {
-		vcpu->cr3 = cr3;
+		vcpu->arch.cr3 = cr3;
 		vcpu->mmu.new_cr3(vcpu);
 	}
 	mutex_unlock(&vcpu->kvm->lock);
@@ -633,7 +632,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long
cr8)
 	if (irqchip_in_kernel(vcpu->kvm))
 		kvm_lapic_set_tpr(vcpu, cr8);
 	else
-		vcpu->cr8 = cr8;
+		vcpu->arch.cr8 = cr8;
 }
 EXPORT_SYMBOL_GPL(set_cr8);
 
@@ -642,16 +641,16 @@ unsigned long get_cr8(struct kvm_vcpu *vcpu)
 	if (irqchip_in_kernel(vcpu->kvm))
 		return kvm_lapic_get_cr8(vcpu);
 	else
-		return vcpu->cr8;
+		return vcpu->arch.cr8;
 }
 EXPORT_SYMBOL_GPL(get_cr8);
 
 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
 {
 	if (irqchip_in_kernel(vcpu->kvm))
-		return vcpu->apic_base;
+		return vcpu->arch.apic_base;
 	else
-		return vcpu->apic_base;
+		return vcpu->arch.apic_base;
 }
 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
 
@@ -661,7 +660,7 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64
data)
 	if (irqchip_in_kernel(vcpu->kvm))
 		kvm_lapic_set_base(vcpu, data);
 	else
-		vcpu->apic_base = data;
+		vcpu->arch.apic_base = data;
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
@@ -671,16 +670,16 @@ void fx_init(struct kvm_vcpu *vcpu)
 
 	/* Initialize guest FPU by resetting ours and saving into
guest's */
 	preempt_disable();
-	fx_save(&vcpu->host_fx_image);
+	fx_save(&vcpu->arch.host_fx_image);
 	fpu_init();
-	fx_save(&vcpu->guest_fx_image);
-	fx_restore(&vcpu->host_fx_image);
+	fx_save(&vcpu->arch.guest_fx_image);
+	fx_restore(&vcpu->arch.host_fx_image);
 	preempt_enable();
 
-	vcpu->cr0 |= X86_CR0_ET;
+	vcpu->arch.cr0 |= X86_CR0_ET;
 	after_mxcsr_mask = offsetof(struct i387_fxsave_struct,
st_space);
-	vcpu->guest_fx_image.mxcsr = 0x1f80;
-	memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
+	vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
+	memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
 	       0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
 }
 EXPORT_SYMBOL_GPL(fx_init);
@@ -1239,8 +1238,8 @@ static struct kvm_io_device
*vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
 {
 	struct kvm_io_device *dev;
 
-	if (vcpu->apic) {
-		dev = &vcpu->apic->dev;
+	if (vcpu->arch.apic) {
+		dev = &vcpu->arch.apic->dev;
 		if (dev->in_range(dev, addr))
 			return dev;
 	}
@@ -1395,8 +1394,8 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t
address)
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
-	vcpu->cr0 &= ~X86_CR0_TS;
-	kvm_x86_ops->set_cr0(vcpu, vcpu->cr0);
+	vcpu->arch.cr0 &= ~X86_CR0_TS;
+	kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0);
 	return X86EMUL_CONTINUE;
 }
 
@@ -1431,7 +1430,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu
*vcpu, const char *context)
 {
 	static int reported;
 	u8 opcodes[4];
-	unsigned long rip = vcpu->rip;
+	unsigned long rip = vcpu->arch.rip;
 	unsigned long rip_linear;
 
 	rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
@@ -1463,7 +1462,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
 {
 	int r;
 
-	vcpu->mmio_fault_cr2 = cr2;
+	vcpu->arch.mmio_fault_cr2 = cr2;
 	kvm_x86_ops->cache_regs(vcpu);
 
 	vcpu->mmio_is_write = 0;
@@ -1473,37 +1472,37 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
 		int cs_db, cs_l;
 		kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
-		vcpu->emulate_ctxt.vcpu = vcpu;
-		vcpu->emulate_ctxt.eflags =
kvm_x86_ops->get_rflags(vcpu);
-		vcpu->emulate_ctxt.cr2 = cr2;
-		vcpu->emulate_ctxt.mode =
-			(vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
+		vcpu->arch.emulate_ctxt.vcpu = vcpu;
+		vcpu->arch.emulate_ctxt.eflags =
kvm_x86_ops->get_rflags(vcpu);
+		vcpu->arch.emulate_ctxt.cr2 = cr2;
+		vcpu->arch.emulate_ctxt.mode =
+			(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
 			? X86EMUL_MODE_REAL : cs_l
 			? X86EMUL_MODE_PROT64 :	cs_db
 			? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 
-		if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
-			vcpu->emulate_ctxt.cs_base = 0;
-			vcpu->emulate_ctxt.ds_base = 0;
-			vcpu->emulate_ctxt.es_base = 0;
-			vcpu->emulate_ctxt.ss_base = 0;
+		if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64)
{
+			vcpu->arch.emulate_ctxt.cs_base = 0;
+			vcpu->arch.emulate_ctxt.ds_base = 0;
+			vcpu->arch.emulate_ctxt.es_base = 0;
+			vcpu->arch.emulate_ctxt.ss_base = 0;
 		} else {
-			vcpu->emulate_ctxt.cs_base =
+			vcpu->arch.emulate_ctxt.cs_base =
 					get_segment_base(vcpu,
VCPU_SREG_CS);
-			vcpu->emulate_ctxt.ds_base =
+			vcpu->arch.emulate_ctxt.ds_base =
 					get_segment_base(vcpu,
VCPU_SREG_DS);
-			vcpu->emulate_ctxt.es_base =
+			vcpu->arch.emulate_ctxt.es_base =
 					get_segment_base(vcpu,
VCPU_SREG_ES);
-			vcpu->emulate_ctxt.ss_base =
+			vcpu->arch.emulate_ctxt.ss_base =
 					get_segment_base(vcpu,
VCPU_SREG_SS);
 		}
 
-		vcpu->emulate_ctxt.gs_base =
+		vcpu->arch.emulate_ctxt.gs_base =
 					get_segment_base(vcpu,
VCPU_SREG_GS);
-		vcpu->emulate_ctxt.fs_base =
+		vcpu->arch.emulate_ctxt.fs_base =
 					get_segment_base(vcpu,
VCPU_SREG_FS);
 
-		r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
+		r = x86_decode_insn(&vcpu->arch.emulate_ctxt,
&emulate_ops);
 		if (r)  {
 			if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
 				return EMULATE_DONE;
@@ -1511,7 +1510,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
 		}
 	}
 
-	r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
+	r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
 
 	if (vcpu->pio.string)
 		return EMULATE_DO_MMIO;
@@ -1535,7 +1534,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
 	}
 
 	kvm_x86_ops->decache_regs(vcpu);
-	kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
+	kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
 
 	if (vcpu->mmio_is_write) {
 		vcpu->mmio_needed = 0;
@@ -1594,11 +1593,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 
 	kvm_x86_ops->cache_regs(vcpu);
 
-	nr = vcpu->regs[VCPU_REGS_RAX];
-	a0 = vcpu->regs[VCPU_REGS_RBX];
-	a1 = vcpu->regs[VCPU_REGS_RCX];
-	a2 = vcpu->regs[VCPU_REGS_RDX];
-	a3 = vcpu->regs[VCPU_REGS_RSI];
+	nr = vcpu->arch.regs[VCPU_REGS_RAX];
+	a0 = vcpu->arch.regs[VCPU_REGS_RBX];
+	a1 = vcpu->arch.regs[VCPU_REGS_RCX];
+	a2 = vcpu->arch.regs[VCPU_REGS_RDX];
+	a3 = vcpu->arch.regs[VCPU_REGS_RSI];
 
 	if (!is_long_mode(vcpu)) {
 		nr &= 0xFFFFFFFF;
@@ -1613,7 +1612,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 		ret = -KVM_ENOSYS;
 		break;
 	}
-	vcpu->regs[VCPU_REGS_RAX] = ret;
+	vcpu->arch.regs[VCPU_REGS_RAX] = ret;
 	kvm_x86_ops->decache_regs(vcpu);
 	return 0;
 }
@@ -1635,7 +1634,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
 
 	kvm_x86_ops->cache_regs(vcpu);
 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
-	if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
+	if (emulator_write_emulated(vcpu->arch.rip, instruction, 3,
vcpu)
 	    != X86EMUL_CONTINUE)
 		ret = -EFAULT;
 
@@ -1675,13 +1674,13 @@ unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu, int cr)
 	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
 	switch (cr) {
 	case 0:
-		return vcpu->cr0;
+		return vcpu->arch.cr0;
 	case 2:
-		return vcpu->cr2;
+		return vcpu->arch.cr2;
 	case 3:
-		return vcpu->cr3;
+		return vcpu->arch.cr3;
 	case 4:
-		return vcpu->cr4;
+		return vcpu->arch.cr4;
 	default:
 		vcpu_printf(vcpu, "%s: unexpected cr %u\n",
__FUNCTION__, cr);
 		return 0;
@@ -1693,17 +1692,17 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int
cr, unsigned long val,
 {
 	switch (cr) {
 	case 0:
-		set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
+		set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
 		*rflags = kvm_x86_ops->get_rflags(vcpu);
 		break;
 	case 2:
-		vcpu->cr2 = val;
+		vcpu->arch.cr2 = val;
 		break;
 	case 3:
 		set_cr3(vcpu, val);
 		break;
 	case 4:
-		set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
+		set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
 		break;
 	default:
 		vcpu_printf(vcpu, "%s: unexpected cr %u\n",
__FUNCTION__, cr);
@@ -1743,11 +1742,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu,
u32 msr, u64 *pdata)
 		data = kvm_get_apic_base(vcpu);
 		break;
 	case MSR_IA32_MISC_ENABLE:
-		data = vcpu->ia32_misc_enable_msr;
+		data = vcpu->arch.ia32_misc_enable_msr;
 		break;
 #ifdef CONFIG_X86_64
 	case MSR_EFER:
-		data = vcpu->shadow_efer;
+		data = vcpu->arch.shadow_efer;
 		break;
 #endif
 	default:
@@ -1781,7 +1780,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64
efer)
 	}
 
 	if (is_paging(vcpu)
-	    && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+	    && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME))
{
 		printk(KERN_DEBUG "set_efer: #GP, change LME while
paging\n");
 		inject_gp(vcpu);
 		return;
@@ -1790,9 +1789,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64
efer)
 	kvm_x86_ops->set_efer(vcpu, efer);
 
 	efer &= ~EFER_LMA;
-	efer |= vcpu->shadow_efer & EFER_LMA;
+	efer |= vcpu->arch.shadow_efer & EFER_LMA;
 
-	vcpu->shadow_efer = efer;
+	vcpu->arch.shadow_efer = efer;
 }
 
 #endif
@@ -1821,7 +1820,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32
msr, u64 data)
 		kvm_set_apic_base(vcpu, data);
 		break;
 	case MSR_IA32_MISC_ENABLE:
-		vcpu->ia32_misc_enable_msr = data;
+		vcpu->arch.ia32_misc_enable_msr = data;
 		break;
 	default:
 		pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
@@ -1856,14 +1855,14 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
 	struct kvm_cpuid_entry *e, *best;
 
 	kvm_x86_ops->cache_regs(vcpu);
-	function = vcpu->regs[VCPU_REGS_RAX];
-	vcpu->regs[VCPU_REGS_RAX] = 0;
-	vcpu->regs[VCPU_REGS_RBX] = 0;
-	vcpu->regs[VCPU_REGS_RCX] = 0;
-	vcpu->regs[VCPU_REGS_RDX] = 0;
+	function = vcpu->arch.regs[VCPU_REGS_RAX];
+	vcpu->arch.regs[VCPU_REGS_RAX] = 0;
+	vcpu->arch.regs[VCPU_REGS_RBX] = 0;
+	vcpu->arch.regs[VCPU_REGS_RCX] = 0;
+	vcpu->arch.regs[VCPU_REGS_RDX] = 0;
 	best = NULL;
-	for (i = 0; i < vcpu->cpuid_nent; ++i) {
-		e = &vcpu->cpuid_entries[i];
+	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+		e = &vcpu->arch.cpuid_entries[i];
 		if (e->function == function) {
 			best = e;
 			break;
@@ -1876,10 +1875,10 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
 				best = e;
 	}
 	if (best) {
-		vcpu->regs[VCPU_REGS_RAX] = best->eax;
-		vcpu->regs[VCPU_REGS_RBX] = best->ebx;
-		vcpu->regs[VCPU_REGS_RCX] = best->ecx;
-		vcpu->regs[VCPU_REGS_RDX] = best->edx;
+		vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
+		vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
+		vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
+		vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
 	}
 	kvm_x86_ops->decache_regs(vcpu);
 	kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -1921,7 +1920,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
 
 	if (!io->string) {
 		if (io->in)
-			memcpy(&vcpu->regs[VCPU_REGS_RAX],
vcpu->pio_data,
+			memcpy(&vcpu->arch.regs[VCPU_REGS_RAX],
vcpu->pio_data,
 			       io->size);
 	} else {
 		if (io->in) {
@@ -1939,15 +1938,15 @@ static int complete_pio(struct kvm_vcpu *vcpu)
 			 * The size of the register should really depend
on
 			 * current address size.
 			 */
-			vcpu->regs[VCPU_REGS_RCX] -= delta;
+			vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
 		}
 		if (io->down)
 			delta = -delta;
 		delta *= io->size;
 		if (io->in)
-			vcpu->regs[VCPU_REGS_RDI] += delta;
+			vcpu->arch.regs[VCPU_REGS_RDI] += delta;
 		else
-			vcpu->regs[VCPU_REGS_RSI] += delta;
+			vcpu->arch.regs[VCPU_REGS_RSI] += delta;
 	}
 
 	kvm_x86_ops->decache_regs(vcpu);
@@ -2011,7 +2010,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct
kvm_run *run, int in,
 	vcpu->pio.rep = 0;
 
 	kvm_x86_ops->cache_regs(vcpu);
-	memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
+	memcpy(vcpu->pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
 	kvm_x86_ops->decache_regs(vcpu);
 
 	kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2205,7 +2204,7 @@ again:
 	 */
 	if (unlikely(prof_on == KVM_PROFILING)) {
 		kvm_x86_ops->cache_regs(vcpu);
-		profile_hit(KVM_PROFILING, (void *)vcpu->rip);
+		profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
 	}
 
 	r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
@@ -2266,7 +2265,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
 		vcpu->mmio_read_completed = 1;
 		vcpu->mmio_needed = 0;
 		r = emulate_instruction(vcpu, kvm_run,
-					vcpu->mmio_fault_cr2, 0, 1);
+					vcpu->arch.mmio_fault_cr2, 0,
1);
 		if (r == EMULATE_DO_MMIO) {
 			/*
 			 * Read-modify-write.  Back to userspace.
@@ -2278,7 +2277,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
 
 	if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
 		kvm_x86_ops->cache_regs(vcpu);
-		vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
+		vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
 		kvm_x86_ops->decache_regs(vcpu);
 	}
 
@@ -2299,26 +2298,26 @@ static int kvm_vcpu_ioctl_get_regs(struct
kvm_vcpu *vcpu,
 
 	kvm_x86_ops->cache_regs(vcpu);
 
-	regs->rax = vcpu->regs[VCPU_REGS_RAX];
-	regs->rbx = vcpu->regs[VCPU_REGS_RBX];
-	regs->rcx = vcpu->regs[VCPU_REGS_RCX];
-	regs->rdx = vcpu->regs[VCPU_REGS_RDX];
-	regs->rsi = vcpu->regs[VCPU_REGS_RSI];
-	regs->rdi = vcpu->regs[VCPU_REGS_RDI];
-	regs->rsp = vcpu->regs[VCPU_REGS_RSP];
-	regs->rbp = vcpu->regs[VCPU_REGS_RBP];
+	regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
+	regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
+	regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
+	regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
+	regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
+	regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
+	regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+	regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
 #ifdef CONFIG_X86_64
-	regs->r8 = vcpu->regs[VCPU_REGS_R8];
-	regs->r9 = vcpu->regs[VCPU_REGS_R9];
-	regs->r10 = vcpu->regs[VCPU_REGS_R10];
-	regs->r11 = vcpu->regs[VCPU_REGS_R11];
-	regs->r12 = vcpu->regs[VCPU_REGS_R12];
-	regs->r13 = vcpu->regs[VCPU_REGS_R13];
-	regs->r14 = vcpu->regs[VCPU_REGS_R14];
-	regs->r15 = vcpu->regs[VCPU_REGS_R15];
+	regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
+	regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
+	regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
+	regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
+	regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
+	regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
+	regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
+	regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
 #endif
 
-	regs->rip = vcpu->rip;
+	regs->rip = vcpu->arch.rip;
 	regs->rflags = kvm_x86_ops->get_rflags(vcpu);
 
 	/*
@@ -2337,26 +2336,26 @@ static int kvm_vcpu_ioctl_set_regs(struct
kvm_vcpu *vcpu,
 {
 	vcpu_load(vcpu);
 
-	vcpu->regs[VCPU_REGS_RAX] = regs->rax;
-	vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
-	vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
-	vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
-	vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
-	vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
-	vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
-	vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
+	vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
+	vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
+	vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
+	vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
+	vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
+	vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
+	vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
+	vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
 #ifdef CONFIG_X86_64
-	vcpu->regs[VCPU_REGS_R8] = regs->r8;
-	vcpu->regs[VCPU_REGS_R9] = regs->r9;
-	vcpu->regs[VCPU_REGS_R10] = regs->r10;
-	vcpu->regs[VCPU_REGS_R11] = regs->r11;
-	vcpu->regs[VCPU_REGS_R12] = regs->r12;
-	vcpu->regs[VCPU_REGS_R13] = regs->r13;
-	vcpu->regs[VCPU_REGS_R14] = regs->r14;
-	vcpu->regs[VCPU_REGS_R15] = regs->r15;
+	vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
+	vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
+	vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
+	vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
+	vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
+	vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
+	vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
+	vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
 #endif
 
-	vcpu->rip = regs->rip;
+	vcpu->arch.rip = regs->rip;
 	kvm_x86_ops->set_rflags(vcpu, regs->rflags);
 
 	kvm_x86_ops->decache_regs(vcpu);
@@ -2398,12 +2397,12 @@ static int kvm_vcpu_ioctl_get_sregs(struct
kvm_vcpu *vcpu,
 	sregs->gdt.base = dt.base;
 
 	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
-	sregs->cr0 = vcpu->cr0;
-	sregs->cr2 = vcpu->cr2;
-	sregs->cr3 = vcpu->cr3;
-	sregs->cr4 = vcpu->cr4;
+	sregs->cr0 = vcpu->arch.cr0;
+	sregs->cr2 = vcpu->arch.cr2;
+	sregs->cr3 = vcpu->arch.cr3;
+	sregs->cr4 = vcpu->arch.cr4;
 	sregs->cr8 = get_cr8(vcpu);
-	sregs->efer = vcpu->shadow_efer;
+	sregs->efer = vcpu->arch.shadow_efer;
 	sregs->apic_base = kvm_get_apic_base(vcpu);
 
 	if (irqchip_in_kernel(vcpu->kvm)) {
@@ -2444,13 +2443,13 @@ static int kvm_vcpu_ioctl_set_sregs(struct
kvm_vcpu *vcpu,
 	dt.base = sregs->gdt.base;
 	kvm_x86_ops->set_gdt(vcpu, &dt);
 
-	vcpu->cr2 = sregs->cr2;
-	mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
-	vcpu->cr3 = sregs->cr3;
+	vcpu->arch.cr2 = sregs->cr2;
+	mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+	vcpu->arch.cr3 = sregs->cr3;
 
 	set_cr8(vcpu, sregs->cr8);
 
-	mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
+	mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
 #ifdef CONFIG_X86_64
 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
 #endif
@@ -2458,14 +2457,14 @@ static int kvm_vcpu_ioctl_set_sregs(struct
kvm_vcpu *vcpu,
 
 	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
 
-	mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
-	vcpu->cr0 = sregs->cr0;
+	mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
+	vcpu->arch.cr0 = sregs->cr0;
 	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
 
-	mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
+	mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
 	if (!is_long_mode(vcpu) && is_pae(vcpu))
-		load_pdptrs(vcpu, vcpu->cr3);
+		load_pdptrs(vcpu, vcpu->arch.cr3);
 
 	if (mmu_reset_needed)
 		kvm_mmu_reset_context(vcpu);
@@ -2650,7 +2649,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm, int n)
 	preempt_notifier_init(&vcpu->preempt_notifier,
&kvm_preempt_ops);
 
 	/* We do fxsave: this must be aligned. */
-	BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+	BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
 
 	vcpu_load(vcpu);
 	r = kvm_mmu_setup(vcpu);
@@ -2722,7 +2721,7 @@ struct fxsave {
 
 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu
*fpu)
 {
-	struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
+	struct fxsave *fxsave = (struct fxsave
*)&vcpu->arch.guest_fx_image;
 
 	vcpu_load(vcpu);
 
@@ -2742,7 +2741,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu, struct kvm_fpu *fpu)
 
 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu
*fpu)
 {
-	struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
+	struct fxsave *fxsave = (struct fxsave
*)&vcpu->arch.guest_fx_image;
 
 	vcpu_load(vcpu);
 
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
index 2093073..ad2ebd3 100644
--- a/drivers/kvm/lapic.c
+++ b/drivers/kvm/lapic.c
@@ -88,7 +88,7 @@ static inline void apic_clear_vector(int vec, void
*bitmap)
 
 static inline int apic_hw_enabled(struct kvm_lapic *apic)
 {
-	return (apic)->vcpu->apic_base & MSR_IA32_APICBASE_ENABLE;
+	return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
 }
 
 static inline int  apic_sw_enabled(struct kvm_lapic *apic)
@@ -172,7 +172,7 @@ static inline int apic_find_highest_irr(struct
kvm_lapic *apic)
 
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 	int highest_irr;
 
 	if (!apic)
@@ -268,7 +268,7 @@ static int apic_match_dest(struct kvm_vcpu *vcpu,
struct kvm_lapic *source,
 			   int short_hand, int dest, int dest_mode)
 {
 	int result = 0;
-	struct kvm_lapic *target = vcpu->apic;
+	struct kvm_lapic *target = vcpu->arch.apic;
 
 	apic_debug("target %p, source %p, dest 0x%x, "
 		   "dest_mode 0x%x, short_hand 0x%x",
@@ -407,7 +407,7 @@ struct kvm_lapic *kvm_apic_round_robin(struct kvm
*kvm, u8 vector,
 			next = 0;
 		if (kvm->vcpus[next] == NULL || !test_bit(next,
&bitmap))
 			continue;
-		apic = kvm->vcpus[next]->apic;
+		apic = kvm->vcpus[next]->arch.apic;
 		if (apic && apic_enabled(apic))
 			break;
 		apic = NULL;
@@ -467,13 +467,14 @@ static void apic_send_ipi(struct kvm_lapic *apic)
 		if (!vcpu)
 			continue;
 
-		if (vcpu->apic &&
+		if (vcpu->arch.apic &&
 		    apic_match_dest(vcpu, apic, short_hand, dest,
dest_mode)) {
 			if (delivery_mode == APIC_DM_LOWEST)
 				set_bit(vcpu->vcpu_id, &lpr_map);
 			else
-				__apic_accept_irq(vcpu->apic,
delivery_mode,
-						  vector, level,
trig_mode);
+				__apic_accept_irq(vcpu->arch.apic,
+					delivery_mode, vector, level,
+					trig_mode);
 		}
 	}
 
@@ -741,15 +742,15 @@ static int apic_mmio_range(struct kvm_io_device
*this, gpa_t addr)
 
 void kvm_free_lapic(struct kvm_vcpu *vcpu)
 {
-	if (!vcpu->apic)
+	if (!vcpu->arch.apic)
 		return;
 
-	hrtimer_cancel(&vcpu->apic->timer.dev);
+	hrtimer_cancel(&vcpu->arch.apic->timer.dev);
 
-	if (vcpu->apic->regs_page)
-		__free_page(vcpu->apic->regs_page);
+	if (vcpu->arch.apic->regs_page)
+		__free_page(vcpu->arch.apic->regs_page);
 
-	kfree(vcpu->apic);
+	kfree(vcpu->arch.apic);
 }
 
 /*
@@ -760,7 +761,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
 
 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	if (!apic)
 		return;
@@ -769,7 +770,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu,
unsigned long cr8)
 
 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 	u64 tpr;
 
 	if (!apic)
@@ -782,18 +783,18 @@ EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
 
 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	if (!apic) {
 		value |= MSR_IA32_APICBASE_BSP;
-		vcpu->apic_base = value;
+		vcpu->arch.apic_base = value;
 		return;
 	}
 	if (apic->vcpu->vcpu_id)
 		value &= ~MSR_IA32_APICBASE_BSP;
 
-	vcpu->apic_base = value;
-	apic->base_address = apic->vcpu->apic_base &
+	vcpu->arch.apic_base = value;
+	apic->base_address = apic->vcpu->arch.apic_base &
 			     MSR_IA32_APICBASE_BASE;
 
 	/* with FSB delivery interrupt, we can restart APIC
functionality */
@@ -804,7 +805,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64
value)
 
 u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
 {
-	return vcpu->apic_base;
+	return vcpu->arch.apic_base;
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
 
@@ -816,7 +817,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
 	apic_debug("%s\n", __FUNCTION__);
 
 	ASSERT(vcpu);
-	apic = vcpu->apic;
+	apic = vcpu->arch.apic;
 	ASSERT(apic != NULL);
 
 	/* Stop the timer in case it's a reset to an active apic */
@@ -847,19 +848,19 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
 	apic->timer.divide_count = 0;
 	atomic_set(&apic->timer.pending, 0);
 	if (vcpu->vcpu_id == 0)
-		vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
+		vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
 	apic_update_ppr(apic);
 
 	apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
 		   "0x%016" PRIx64 ", base_address=0x%0lx.\n",
__FUNCTION__,
 		   vcpu, kvm_apic_id(apic),
-		   vcpu->apic_base, apic->base_address);
+		   vcpu->arch.apic_base, apic->base_address);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_reset);
 
 int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 	int ret = 0;
 
 	if (!apic)
@@ -930,7 +931,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 	if (!apic)
 		goto nomem;
 
-	vcpu->apic = apic;
+	vcpu->arch.apic = apic;
 
 	apic->regs_page = alloc_page(GFP_KERNEL);
 	if (apic->regs_page == NULL) {
@@ -945,7 +946,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 	hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
 	apic->timer.dev.function = apic_timer_fn;
 	apic->base_address = APIC_DEFAULT_PHYS_BASE;
-	vcpu->apic_base = APIC_DEFAULT_PHYS_BASE;
+	vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
 
 	kvm_lapic_reset(vcpu);
 	apic->dev.read = apic_mmio_read;
@@ -963,7 +964,7 @@ EXPORT_SYMBOL_GPL(kvm_create_lapic);
 
 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 	int highest_irr;
 
 	if (!apic || !apic_enabled(apic))
@@ -979,11 +980,11 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
 
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
 {
-	u32 lvt0 = apic_get_reg(vcpu->apic, APIC_LVT0);
+	u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
 	int r = 0;
 
 	if (vcpu->vcpu_id == 0) {
-		if (!apic_hw_enabled(vcpu->apic))
+		if (!apic_hw_enabled(vcpu->arch.apic))
 			r = 1;
 		if ((lvt0 & APIC_LVT_MASKED) == 0 &&
 		    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
@@ -994,7 +995,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
 
 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	if (apic && apic_lvt_enabled(apic, APIC_LVTT) &&
 		atomic_read(&apic->timer.pending) > 0) {
@@ -1005,7 +1006,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu
*vcpu)
 
 void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
 		apic->timer.last_update = ktime_add_ns(
@@ -1016,7 +1017,7 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu
*vcpu, int vec)
 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
 {
 	int vector = kvm_apic_has_interrupt(vcpu);
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	if (vector == -1)
 		return -1;
@@ -1029,9 +1030,9 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
 
 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	apic->base_address = vcpu->apic_base &
+	apic->base_address = vcpu->arch.apic_base &
 			     MSR_IA32_APICBASE_BASE;
 	apic_set_reg(apic, APIC_LVR, APIC_VERSION);
 	apic_update_ppr(apic);
@@ -1042,7 +1043,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu
*vcpu)
 
 void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
 {
-	struct kvm_lapic *apic = vcpu->apic;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 	struct hrtimer *timer;
 
 	if (!apic)
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index f52604a..b1d0637 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
 
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
-	return vcpu->cr0 & X86_CR0_WP;
+	return vcpu->arch.cr0 & X86_CR0_WP;
 }
 
 static int is_cpuid_PSE36(void)
@@ -179,7 +179,7 @@ static int is_cpuid_PSE36(void)
 
 static int is_nx(struct kvm_vcpu *vcpu)
 {
-	return vcpu->shadow_efer & EFER_NX;
+	return vcpu->arch.shadow_efer & EFER_NX;
 }
 
 static int is_present_pte(unsigned long pte)
@@ -960,7 +960,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 	gfn_t root_gfn;
 	struct kvm_mmu_page *page;
 
-	root_gfn = vcpu->cr3 >> PAGE_SHIFT;
+	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
 
 #ifdef CONFIG_X86_64
 	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
@@ -980,11 +980,11 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
 		ASSERT(!VALID_PAGE(root));
 		if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
-			if (!is_present_pte(vcpu->pdptrs[i])) {
+			if (!is_present_pte(vcpu->arch.pdptrs[i])) {
 				vcpu->mmu.pae_root[i] = 0;
 				continue;
 			}
-			root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
+			root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
 		} else if (vcpu->mmu.root_level == 0)
 			root_gfn = 0;
 		page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
@@ -1053,7 +1053,7 @@ static void kvm_mmu_flush_tlb(struct kvm_vcpu
*vcpu)
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
-	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
+	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
 	mmu_free_roots(vcpu);
 }
 
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index a9e687b..f5c9399 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -103,10 +103,10 @@ static int FNAME(walk_addr)(struct guest_walker
*walker,
 	walker->table = NULL;
 	walker->page = NULL;
 	walker->ptep = NULL;
-	root = vcpu->cr3;
+	root = vcpu->arch.cr3;
 #if PTTYPE == 64
 	if (!is_long_mode(vcpu)) {
-		walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
+		walker->ptep = &vcpu->arch.pdptrs[(addr >> 30) & 3];
 		root = *walker->ptep;
 		walker->pte = root;
 		if (!(root & PT_PRESENT_MASK))
@@ -124,7 +124,7 @@ static int FNAME(walk_addr)(struct guest_walker
*walker,
 	walker->table = kmap_atomic(walker->page, KM_USER0);
 
 	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
-	       (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
+	       (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
 
 	walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
 
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index f643379..67918e8 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -18,6 +18,7 @@
 #include "x86_emulate.h"
 #include "irq.h"
 
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/vmalloc.h>
@@ -188,7 +189,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64
efer)
 		efer &= ~KVM_EFER_LME;
 
 	to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
-	vcpu->shadow_efer = efer;
+	vcpu->arch.shadow_efer = efer;
 }
 
 static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
@@ -235,7 +236,7 @@ static void skip_emulated_instruction(struct
kvm_vcpu *vcpu)
 		       svm->vmcb->save.rip,
 		       svm->next_rip);
 
-	vcpu->rip = svm->vmcb->save.rip = svm->next_rip;
+	vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
 	svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
 
 	vcpu->interrupt_window_open = 1;
@@ -592,10 +593,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm
*kvm, unsigned int id)
 	init_vmcb(svm->vmcb);
 
 	fx_init(&svm->vcpu);
-	svm->vcpu.fpu_active = 1;
-	svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+	svm->vcpu.arch.fpu_active = 1;
+	svm->vcpu.arch.apic_base = 0xfee00000 |
MSR_IA32_APICBASE_ENABLE;
 	if (svm->vcpu.vcpu_id == 0)
-		svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
+		svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
 
 	return &svm->vcpu;
 
@@ -629,7 +630,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int
cpu)
 		 * increasing TSC.
 		 */
 		rdtscll(tsc_this);
-		delta = vcpu->host_tsc - tsc_this;
+		delta = vcpu->arch.host_tsc - tsc_this;
 		svm->vmcb->control.tsc_offset += delta;
 		vcpu->cpu = cpu;
 		kvm_migrate_apic_timer(vcpu);
@@ -647,7 +648,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
 		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 
-	rdtscll(vcpu->host_tsc);
+	rdtscll(vcpu->arch.host_tsc);
 }
 
 static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
@@ -658,17 +659,17 @@ static void svm_cache_regs(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
-	vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
-	vcpu->rip = svm->vmcb->save.rip;
+	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+	vcpu->arch.rip = svm->vmcb->save.rip;
 }
 
 static void svm_decache_regs(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
-	svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
-	svm->vmcb->save.rip = vcpu->rip;
+	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+	svm->vmcb->save.rip = vcpu->arch.rip;
 }
 
 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -766,24 +767,24 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu,
unsigned long cr0)
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 #ifdef CONFIG_X86_64
-	if (vcpu->shadow_efer & KVM_EFER_LME) {
+	if (vcpu->arch.shadow_efer & KVM_EFER_LME) {
 		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
-			vcpu->shadow_efer |= KVM_EFER_LMA;
+			vcpu->arch.shadow_efer |= KVM_EFER_LMA;
 			svm->vmcb->save.efer |= KVM_EFER_LMA |
KVM_EFER_LME;
 		}
 
 		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
-			vcpu->shadow_efer &= ~KVM_EFER_LMA;
+			vcpu->arch.shadow_efer &= ~KVM_EFER_LMA;
 			svm->vmcb->save.efer &= ~(KVM_EFER_LMA |
KVM_EFER_LME);
 		}
 	}
 #endif
-	if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
+	if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
 		svm->vmcb->control.intercept_exceptions &= ~(1 <<
NM_VECTOR);
-		vcpu->fpu_active = 1;
+		vcpu->arch.fpu_active = 1;
 	}
 
-	vcpu->cr0 = cr0;
+	vcpu->arch.cr0 = cr0;
 	cr0 |= X86_CR0_PG | X86_CR0_WP;
 	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
 	svm->vmcb->save.cr0 = cr0;
@@ -791,7 +792,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu,
unsigned long cr0)
 
 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-       vcpu->cr4 = cr4;
+       vcpu->arch.cr4 = cr4;
        to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
 }
 
@@ -896,7 +897,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int
dr, unsigned long value,
 		svm->db_regs[dr] = value;
 		return;
 	case 4 ... 5:
-		if (vcpu->cr4 & X86_CR4_DE) {
+		if (vcpu->arch.cr4 & X86_CR4_DE) {
 			*exception = UD_VECTOR;
 			return;
 		}
@@ -977,9 +978,9 @@ static int ud_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
 static int nm_interception(struct vcpu_svm *svm, struct kvm_run
*kvm_run)
 {
 	svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
-	if (!(svm->vcpu.cr0 & X86_CR0_TS))
+	if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
 		svm->vmcb->save.cr0 &= ~X86_CR0_TS;
-	svm->vcpu.fpu_active = 1;
+	svm->vcpu.arch.fpu_active = 1;
 
 	return 1;
 }
@@ -1121,14 +1122,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu,
unsigned ecx, u64 *data)
 
 static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run
*kvm_run)
 {
-	u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
+	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
 	u64 data;
 
 	if (svm_get_msr(&svm->vcpu, ecx, &data))
 		svm_inject_gp(&svm->vcpu, 0);
 	else {
 		svm->vmcb->save.rax = data & 0xffffffff;
-		svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
+		svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
 		svm->next_rip = svm->vmcb->save.rip + 2;
 		skip_emulated_instruction(&svm->vcpu);
 	}
@@ -1181,9 +1182,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu,
unsigned ecx, u64 data)
 
 static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run
*kvm_run)
 {
-	u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
+	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
 	u64 data = (svm->vmcb->save.rax & -1u)
-		| ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
+		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) <<
32);
 	svm->next_rip = svm->vmcb->save.rip + 2;
 	if (svm_set_msr(&svm->vcpu, ecx, data))
 		svm_inject_gp(&svm->vcpu, 0);
@@ -1468,7 +1469,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 	svm->host_cr2 = kvm_read_cr2();
 	svm->host_dr6 = read_dr6();
 	svm->host_dr7 = read_dr7();
-	svm->vmcb->save.cr2 = vcpu->cr2;
+	svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
 	if (svm->vmcb->save.dr7 & 0xff) {
 		write_dr7(0);
@@ -1568,21 +1569,35 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 		:
 		: [svm]"a"(svm),
 		  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
-		  [rbx]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_RBX])),
-		  [rcx]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_RCX])),
-		  [rdx]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_RDX])),
-		  [rsi]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_RSI])),
-		  [rdi]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_RDI])),
-		  [rbp]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_RBP]))
+		  [rbx]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_RBX])),
+		  [rcx]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_RCX])),
+		  [rdx]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_RDX])),
+		  [rsi]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_RSI])),
+		  [rdi]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_RDI])),
+		  [rbp]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_RBP]))
 #ifdef CONFIG_X86_64
-		  , [r8]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R8])),
-		  [r9]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R9])),
-		  [r10]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R10])),
-		  [r11]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R11])),
-		  [r12]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R12])),
-		  [r13]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R13])),
-		  [r14]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R14])),
-		  [r15]"i"(offsetof(struct vcpu_svm,
vcpu.regs[VCPU_REGS_R15]))
+		  , [r8]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R8])),
+		  [r9]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R9])),
+		  [r10]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R10])),
+		  [r11]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R11])),
+		  [r12]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R12])),
+		  [r13]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R13])),
+		  [r14]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R14])),
+		  [r15]"i"(offsetof(struct vcpu_svm,
+					vcpu.arch.regs[VCPU_REGS_R15]))
 #endif
 		: "cc", "memory");
 
@@ -1593,7 +1608,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 	if ((svm->vmcb->save.dr7 & 0xff))
 		load_db_regs(svm->host_db_regs);
 
-	vcpu->cr2 = svm->vmcb->save.cr2;
+	vcpu->arch.cr2 = svm->vmcb->save.cr2;
 
 	write_dr6(svm->host_dr6);
 	write_dr7(svm->host_dr7);
@@ -1616,10 +1631,10 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu,
unsigned long root)
 	svm->vmcb->save.cr3 = root;
 	force_new_asid(vcpu);
 
-	if (vcpu->fpu_active) {
+	if (vcpu->arch.fpu_active) {
 		svm->vmcb->control.intercept_exceptions |= (1 <<
NM_VECTOR);
 		svm->vmcb->save.cr0 |= X86_CR0_TS;
-		vcpu->fpu_active = 0;
+		vcpu->arch.fpu_active = 0;
 	}
 }
 
@@ -1641,7 +1656,7 @@ static void svm_inject_page_fault(struct kvm_vcpu
*vcpu,
 						DF_VECTOR;
 		return;
 	}
-	vcpu->cr2 = addr;
+	vcpu->arch.cr2 = addr;
 	svm->vmcb->save.cr2 = addr;
 	svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
 					SVM_EVTINJ_VALID_ERR |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 2d75599..a5e20ee 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -14,7 +14,6 @@
  * the COPYING file in the top-level directory.
  *
  */
-
 #include "kvm.h"
 #include "x86_emulate.h"
 #include "irq.h"
@@ -220,7 +219,7 @@ static void __vcpu_clear(void *arg)
 		vmcs_clear(vmx->vmcs);
 	if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
 		per_cpu(current_vmcs, cpu) = NULL;
-	rdtscll(vmx->vcpu.host_tsc);
+	rdtscll(vmx->vcpu.arch.host_tsc);
 }
 
 static void vcpu_clear(struct vcpu_vmx *vmx)
@@ -312,11 +311,11 @@ static void update_exception_bitmap(struct
kvm_vcpu *vcpu)
 	u32 eb;
 
 	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
-	if (!vcpu->fpu_active)
+	if (!vcpu->arch.fpu_active)
 		eb |= 1u << NM_VECTOR;
 	if (vcpu->guest_debug.enabled)
 		eb |= 1u << 1;
-	if (vcpu->rmode.active)
+	if (vcpu->arch.rmode.active)
 		eb = ~0;
 	vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -498,7 +497,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int
cpu)
 		 * Make sure the time stamp counter is monotonous.
 		 */
 		rdtscll(tsc_this);
-		delta = vcpu->host_tsc - tsc_this;
+		delta = vcpu->arch.host_tsc - tsc_this;
 		vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) +
delta);
 	}
 }
@@ -511,20 +510,20 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 
 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
 {
-	if (vcpu->fpu_active)
+	if (vcpu->arch.fpu_active)
 		return;
-	vcpu->fpu_active = 1;
+	vcpu->arch.fpu_active = 1;
 	vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
-	if (vcpu->cr0 & X86_CR0_TS)
+	if (vcpu->arch.cr0 & X86_CR0_TS)
 		vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
 	update_exception_bitmap(vcpu);
 }
 
 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 {
-	if (!vcpu->fpu_active)
+	if (!vcpu->arch.fpu_active)
 		return;
-	vcpu->fpu_active = 0;
+	vcpu->arch.fpu_active = 0;
 	vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
 	update_exception_bitmap(vcpu);
 }
@@ -632,7 +631,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
 		 * if efer.sce is enabled.
 		 */
 		index = __find_msr_index(vmx, MSR_K6_STAR);
-		if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
+		if ((index >= 0) && (vmx->vcpu.arch.shadow_efer &
EFER_SCE))
 			move_msr_up(vmx, index, save_nmsrs++);
 	}
 #endif
@@ -776,12 +775,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32
msr_index, u64 data)
 
 /*
  * Sync the rsp and rip registers into the vcpu structure.  This allows
- * registers to be accessed by indexing vcpu->regs.
+ * registers to be accessed by indexing vcpu->arch.regs.
  */
 static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
 {
-	vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
-	vcpu->rip = vmcs_readl(GUEST_RIP);
+	vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+	vcpu->arch.rip = vmcs_readl(GUEST_RIP);
 }
 
 /*
@@ -790,8 +789,8 @@ static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
  */
 static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
 {
-	vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
-	vmcs_writel(GUEST_RIP, vcpu->rip);
+	vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+	vmcs_writel(GUEST_RIP, vcpu->arch.rip);
 }
 
 static int set_guest_debug(struct kvm_vcpu *vcpu, struct
kvm_debug_guest *dbg)
@@ -1069,15 +1068,15 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
 {
 	unsigned long flags;
 
-	vcpu->rmode.active = 0;
+	vcpu->arch.rmode.active = 0;
 
-	vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
-	vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
-	vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
+	vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
+	vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
+	vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
 
 	flags = vmcs_readl(GUEST_RFLAGS);
 	flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
-	flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
+	flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
 	vmcs_writel(GUEST_RFLAGS, flags);
 
 	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1085,10 +1084,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
 
 	update_exception_bitmap(vcpu);
 
-	fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
-	fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
-	fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
-	fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
+	fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
+	fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
+	fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
+	fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
 
 	vmcs_write16(GUEST_SS_SELECTOR, 0);
 	vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
@@ -1121,19 +1120,19 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 {
 	unsigned long flags;
 
-	vcpu->rmode.active = 1;
+	vcpu->arch.rmode.active = 1;
 
-	vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
+	vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
 	vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
 
-	vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
+	vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
 	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
 
-	vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
+	vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
 	flags = vmcs_readl(GUEST_RFLAGS);
-	vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
+	vcpu->arch.rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
 
 	flags |= IOPL_MASK | X86_EFLAGS_VM;
 
@@ -1151,12 +1150,11 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 		vmcs_writel(GUEST_CS_BASE, 0xf0000);
 	vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
 
-	fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
-	fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
-	fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
-	fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+	fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
+	fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
+	fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
+	fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
 
-	kvm_mmu_reset_context(vcpu);
 	init_rmode_tss(vcpu->kvm);
 }
 
@@ -1175,7 +1173,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
 			     | AR_TYPE_BUSY_64_TSS);
 	}
 
-	vcpu->shadow_efer |= EFER_LMA;
+	vcpu->arch.shadow_efer |= EFER_LMA;
 
 	find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA |
EFER_LME;
 	vmcs_write32(VM_ENTRY_CONTROLS,
@@ -1185,7 +1183,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-	vcpu->shadow_efer &= ~EFER_LMA;
+	vcpu->arch.shadow_efer &= ~EFER_LMA;
 
 	vmcs_write32(VM_ENTRY_CONTROLS,
 		     vmcs_read32(VM_ENTRY_CONTROLS)
@@ -1196,22 +1194,22 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
 
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
-	vcpu->cr4 &= KVM_GUEST_CR4_MASK;
-	vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+	vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
+	vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
 }
 
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
 	vmx_fpu_deactivate(vcpu);
 
-	if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
+	if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
 		enter_pmode(vcpu);
 
-	if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
+	if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
 		enter_rmode(vcpu);
 
 #ifdef CONFIG_X86_64
-	if (vcpu->shadow_efer & EFER_LME) {
+	if (vcpu->arch.shadow_efer & EFER_LME) {
 		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
 			enter_lmode(vcpu);
 		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
@@ -1222,7 +1220,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu,
unsigned long cr0)
 	vmcs_writel(CR0_READ_SHADOW, cr0);
 	vmcs_writel(GUEST_CR0,
 		    (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
-	vcpu->cr0 = cr0;
+	vcpu->arch.cr0 = cr0;
 
 	if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
 		vmx_fpu_activate(vcpu);
@@ -1231,16 +1229,16 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu,
unsigned long cr0)
 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
 	vmcs_writel(GUEST_CR3, cr3);
-	if (vcpu->cr0 & X86_CR0_PE)
+	if (vcpu->arch.cr0 & X86_CR0_PE)
 		vmx_fpu_deactivate(vcpu);
 }
 
 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	vmcs_writel(CR4_READ_SHADOW, cr4);
-	vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
+	vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ?
 		    KVM_RMODE_VM_CR4_ALWAYS_ON :
KVM_PMODE_VM_CR4_ALWAYS_ON));
-	vcpu->cr4 = cr4;
+	vcpu->arch.cr4 = cr4;
 }
 
 #ifdef CONFIG_X86_64
@@ -1250,7 +1248,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu,
u64 efer)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
 
-	vcpu->shadow_efer = efer;
+	vcpu->arch.shadow_efer = efer;
 	if (efer & EFER_LMA) {
 		vmcs_write32(VM_ENTRY_CONTROLS,
 				     vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1327,17 +1325,17 @@ static void vmx_set_segment(struct kvm_vcpu
*vcpu,
 	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
 	u32 ar;
 
-	if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
-		vcpu->rmode.tr.selector = var->selector;
-		vcpu->rmode.tr.base = var->base;
-		vcpu->rmode.tr.limit = var->limit;
-		vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+	if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
+		vcpu->arch.rmode.tr.selector = var->selector;
+		vcpu->arch.rmode.tr.base = var->base;
+		vcpu->arch.rmode.tr.limit = var->limit;
+		vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
 		return;
 	}
 	vmcs_writel(sf->base, var->base);
 	vmcs_write32(sf->limit, var->limit);
 	vmcs_write16(sf->selector, var->selector);
-	if (vcpu->rmode.active && var->s) {
+	if (vcpu->arch.rmode.active && var->s) {
 		/*
 		 * Hack real-mode segments into vm86 compatibility.
 		 */
@@ -1438,9 +1436,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 		goto out;
 	}
 
-	vmx->vcpu.rmode.active = 0;
+	vmx->vcpu.arch.rmode.active = 0;
 
-	vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
 	set_cr8(&vmx->vcpu, 0);
 	msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
 	if (vmx->vcpu.vcpu_id == 0)
@@ -1601,15 +1599,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
 	if (vm_need_tpr_shadow(vmx->vcpu.kvm))
 		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
-			     page_to_phys(vmx->vcpu.apic->regs_page));
+
page_to_phys(vmx->vcpu.arch.apic->regs_page));
 	vmcs_write32(TPR_THRESHOLD, 0);
 #endif
 
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
 	vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
 
-	vmx->vcpu.cr0 = 0x60000010;
-	vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */
+	vmx->vcpu.arch.cr0 = 0x60000010;
+	vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
 	vmx_set_cr4(&vmx->vcpu, 0);
 #ifdef CONFIG_X86_64
 	vmx_set_efer(&vmx->vcpu, 0);
@@ -1680,7 +1678,7 @@ static void inject_rmode_irq(struct kvm_vcpu
*vcpu, int irq)
 
 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 {
-	if (vcpu->rmode.active) {
+	if (vcpu->arch.rmode.active) {
 		inject_rmode_irq(vcpu, irq);
 		return;
 	}
@@ -1751,7 +1749,7 @@ static void kvm_guest_debug_pre(struct kvm_vcpu
*vcpu)
 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
 				  int vec, u32 err_code)
 {
-	if (!vcpu->rmode.active)
+	if (!vcpu->arch.rmode.active)
 		return 0;
 
 	/*
@@ -1786,8 +1784,10 @@ static int handle_exception(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
 		set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
 	}
 
-	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
-		return 1;  /* already handled by vmx_vcpu_run() */
+	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
+		asm("int $2");
+		return 1;
+	}
 
 	if (is_no_device(intr_info)) {
 		vmx_fpu_activate(vcpu);
@@ -1837,7 +1837,7 @@ static int handle_exception(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 		}
 	}
 
-	if (vcpu->rmode.active &&
+	if (vcpu->arch.rmode.active &&
 	    handle_rmode_exception(vcpu, intr_info &
INTR_INFO_VECTOR_MASK,
 
error_code)) {
 		if (vcpu->halt_request) {
@@ -1922,22 +1922,22 @@ static int handle_cr(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 		switch (cr) {
 		case 0:
 			vcpu_load_rsp_rip(vcpu);
-			set_cr0(vcpu, vcpu->regs[reg]);
+			set_cr0(vcpu, vcpu->arch.regs[reg]);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 3:
 			vcpu_load_rsp_rip(vcpu);
-			set_cr3(vcpu, vcpu->regs[reg]);
+			set_cr3(vcpu, vcpu->arch.regs[reg]);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 4:
 			vcpu_load_rsp_rip(vcpu);
-			set_cr4(vcpu, vcpu->regs[reg]);
+			set_cr4(vcpu, vcpu->arch.regs[reg]);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 8:
 			vcpu_load_rsp_rip(vcpu);
-			set_cr8(vcpu, vcpu->regs[reg]);
+			set_cr8(vcpu, vcpu->arch.regs[reg]);
 			skip_emulated_instruction(vcpu);
 			kvm_run->exit_reason = KVM_EXIT_SET_TPR;
 			return 0;
@@ -1946,8 +1946,8 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct
kvm_run *kvm_run)
 	case 2: /* clts */
 		vcpu_load_rsp_rip(vcpu);
 		vmx_fpu_deactivate(vcpu);
-		vcpu->cr0 &= ~X86_CR0_TS;
-		vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+		vcpu->arch.cr0 &= ~X86_CR0_TS;
+		vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
 		vmx_fpu_activate(vcpu);
 		skip_emulated_instruction(vcpu);
 		return 1;
@@ -1955,13 +1955,13 @@ static int handle_cr(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 		switch (cr) {
 		case 3:
 			vcpu_load_rsp_rip(vcpu);
-			vcpu->regs[reg] = vcpu->cr3;
+			vcpu->arch.regs[reg] = vcpu->arch.cr3;
 			vcpu_put_rsp_rip(vcpu);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 8:
 			vcpu_load_rsp_rip(vcpu);
-			vcpu->regs[reg] = get_cr8(vcpu);
+			vcpu->arch.regs[reg] = get_cr8(vcpu);
 			vcpu_put_rsp_rip(vcpu);
 			skip_emulated_instruction(vcpu);
 			return 1;
@@ -2007,7 +2007,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct
kvm_run *kvm_run)
 		default:
 			val = 0;
 		}
-		vcpu->regs[reg] = val;
+		vcpu->arch.regs[reg] = val;
 	} else {
 		/* mov to dr */
 	}
@@ -2024,7 +2024,7 @@ static int handle_cpuid(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 
 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
 	u64 data;
 
 	if (vmx_get_msr(vcpu, ecx, &data)) {
@@ -2033,17 +2033,17 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 	}
 
 	/* FIXME: handling of bits 32:63 of rax, rdx */
-	vcpu->regs[VCPU_REGS_RAX] = data & -1u;
-	vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
+	vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
+	vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
 	skip_emulated_instruction(vcpu);
 	return 1;
 }
 
 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
-	u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
-		| ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
+	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
+		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
 	if (vmx_set_msr(vcpu, ecx, data) != 0) {
 		vmx_inject_gp(vcpu, 0);
@@ -2333,24 +2333,24 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 	      : "=q" (vmx->fail)
 	      : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
 		"c"(vcpu),
-		[rax]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_RAX])),
-		[rbx]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_RBX])),
-		[rcx]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_RCX])),
-		[rdx]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_RDX])),
-		[rsi]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_RSI])),
-		[rdi]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_RDI])),
-		[rbp]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_RBP])),
+		[rax]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_RAX])),
+		[rbx]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_RBX])),
+		[rcx]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_RCX])),
+		[rdx]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_RDX])),
+		[rsi]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_RSI])),
+		[rdi]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_RDI])),
+		[rbp]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_RBP])),
 #ifdef CONFIG_X86_64
-		[r8]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8])),
-		[r9]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9])),
-		[r10]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_R10])),
-		[r11]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_R11])),
-		[r12]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_R12])),
-		[r13]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_R13])),
-		[r14]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_R14])),
-		[r15]"i"(offsetof(struct kvm_vcpu,
regs[VCPU_REGS_R15])),
+		[r8]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R8])),
+		[r9]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R9])),
+		[r10]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R10])),
+		[r11]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R11])),
+		[r12]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R12])),
+		[r13]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R13])),
+		[r14]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R14])),
+		[r15]"i"(offsetof(struct kvm_vcpu,
arch.regs[VCPU_REGS_R15])),
 #endif
-		[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
+		[cr2]"i"(offsetof(struct kvm_vcpu, arch.cr2))
 	      : "cc", "memory");
 
 	vcpu->interrupt_window_open =
@@ -2386,7 +2386,7 @@ static void vmx_inject_page_fault(struct kvm_vcpu
*vcpu,
 			     INTR_INFO_VALID_MASK);
 		return;
 	}
-	vcpu->cr2 = addr;
+	vcpu->arch.cr2 = addr;
 	vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
 		     PF_VECTOR |
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 1fe209d..91931d0 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -15,7 +15,6 @@
  */
 
 #include "kvm.h"
-#include "x86.h"
 #include "irq.h"
 
 #include <linux/kvm.h>
@@ -187,8 +186,8 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
 
 	rdmsrl(MSR_EFER, efer);
 	entry = NULL;
-	for (i = 0; i < vcpu->cpuid_nent; ++i) {
-		e = &vcpu->cpuid_entries[i];
+	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+		e = &vcpu->arch.cpuid_entries[i];
 		if (e->function == 0x80000001) {
 			entry = e;
 			break;
@@ -210,10 +209,10 @@ static int kvm_vcpu_ioctl_set_cpuid(struct
kvm_vcpu *vcpu,
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
 		goto out;
 	r = -EFAULT;
-	if (copy_from_user(&vcpu->cpuid_entries, entries,
+	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
 			   cpuid->nent * sizeof(struct
kvm_cpuid_entry)))
 		goto out;
-	vcpu->cpuid_nent = cpuid->nent;
+	vcpu->arch.cpuid_nent = cpuid->nent;
 	cpuid_fix_nx_cap(vcpu);
 	return 0;
 
@@ -225,7 +224,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu
*vcpu,
 				    struct kvm_lapic_state *s)
 {
 	vcpu_load(vcpu);
-	memcpy(s->regs, vcpu->apic->regs, sizeof *s);
+	memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
 	vcpu_put(vcpu);
 
 	return 0;
@@ -235,7 +234,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu
*vcpu,
 				    struct kvm_lapic_state *s)
 {
 	vcpu_load(vcpu);
-	memcpy(vcpu->apic->regs, s->regs, sizeof *s);
+	memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
 	kvm_apic_post_state_restore(vcpu);
 	vcpu_put(vcpu);
 
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 1e2f71b..5d5b82c 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -1,16 +1,61 @@
-#/*
- * Kernel-based Virtual Machine driver for Linux
- *
- * This header defines architecture specific interfaces, x86 version
- *
+#ifndef __KVM_X86_H
+#define __KVM_X86_H
+
+/*
  * This work is licensed under the terms of the GNU GPL, version 2.
See
  * the COPYING file in the top-level directory.
- *
  */
 
-#ifndef KVM_X86_H
-#define KVM_X86_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/signal.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+struct kvm_arch_vcpu{
+	u64 host_tsc;
+	unsigned long regs[NR_VCPU_REGS]; /* for rsp:
vcpu_load_rsp_rip() */
+	unsigned long rip;      /* needs vcpu_load_rsp_rip() */
+
+	unsigned long cr0;
+	unsigned long cr2;
+	unsigned long cr3;
+	unsigned long cr4;
+	unsigned long cr8;
+	u64 pdptrs[4]; /* pae */
+	u64 shadow_efer;
+	u64 apic_base;
+	struct kvm_lapic *apic;    /* kernel irqchip context */
+
+	u64 ia32_misc_enable_msr;
+	gva_t mmio_fault_cr2;
+
+	struct i387_fxsave_struct host_fx_image;
+	struct i387_fxsave_struct guest_fx_image;
+	int fpu_active;
+	int guest_fpu_loaded;
+
+	struct {
+		int active;
+		u8 save_iopl;
+		struct kvm_save_segment {
+			u16 selector;
+			unsigned long base;
+			u32 limit;
+			u32 ar;
+		} tr, es, ds, fs, gs;
+	} rmode;
+
+	int cpuid_nent;
+	struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+
+	/* emulate context */
 
-#include "kvm.h"
+	struct x86_emulate_ctxt emulate_ctxt;
+};
 
 #endif
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index fa33fcd..8ee4f67 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -527,8 +527,8 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
 	/* Shadow copy of register state. Committed on successful
emulation. */
 
 	memset(c, 0, sizeof(struct decode_cache));
-	c->eip = ctxt->vcpu->rip;
-	memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs);
+	c->eip = ctxt->vcpu->arch.rip;
+	memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
 
 	switch (mode) {
 	case X86EMUL_MODE_REAL:
@@ -1153,7 +1153,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
 	 * modify them.
 	 */
 
-	memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs);
+	memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
 	saved_eip = c->eip;
 
 	if ((c->d & ModRM) && (c->modrm_mod != 3))
@@ -1357,8 +1357,8 @@ writeback:
 		goto done;
 
 	/* Commit shadow register state. */
-	memcpy(ctxt->vcpu->regs, c->regs, sizeof c->regs);
-	ctxt->vcpu->rip = c->eip;
+	memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
+	ctxt->vcpu->arch.rip = c->eip;
 
 done:
 	if (rc == X86EMUL_UNHANDLEABLE) {
@@ -1453,11 +1453,11 @@ special_insn:
 	}
 	if (c->rep_prefix) {
 		if (c->regs[VCPU_REGS_RCX] == 0) {
-			ctxt->vcpu->rip = c->eip;
+			ctxt->vcpu->arch.rip = c->eip;
 			goto done;
 		}
 		c->regs[VCPU_REGS_RCX]--;
-		c->eip = ctxt->vcpu->rip;
+		c->eip = ctxt->vcpu->arch.rip;
 	}
 	switch (c->b) {
 	case 0xa4 ... 0xa5:	/* movs */
@@ -1718,7 +1718,7 @@ twobyte_special_insn:
 		rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX],
msr_data);
 		if (rc) {
 			kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
-			c->eip = ctxt->vcpu->rip;
+			c->eip = ctxt->vcpu->arch.rip;
 		}
 		rc = X86EMUL_CONTINUE;
 		break;
@@ -1727,7 +1727,7 @@ twobyte_special_insn:
 		rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX],
&msr_data);
 		if (rc) {
 			kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
-			c->eip = ctxt->vcpu->rip;
+			c->eip = ctxt->vcpu->arch.rip;
 		} else {
 			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
 			c->regs[VCPU_REGS_RDX] = msr_data >> 32;
-- 
1.5.1.2

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2007-10-14  6:07 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-10-12 10:39 [PATCH][Resend] Split kvm_vcpu to support new archs Zhang, Xiantao
     [not found] ` <42DFA526FC41B1429CE7279EF83C6BDC808D7E-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-10-12 14:09   ` Carsten Otte
     [not found]     ` <470F8025.6060001-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2007-10-13  4:20       ` Zhang, Xiantao
     [not found]         ` <42DFA526FC41B1429CE7279EF83C6BDC808DBC-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-10-13  7:48           ` Carsten Otte
2007-10-13  7:59       ` Avi Kivity
     [not found]         ` <47107ADA.90204-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-10-13  8:12           ` Carsten Otte
     [not found]             ` <47107DF1.4020403-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2007-10-14  6:07               ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox