public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] vmx: pass vcpu_vmx internally
@ 2007-07-30  6:31 Rusty Russell
       [not found] ` <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
  0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30  6:31 UTC (permalink / raw)
  To: kvm-devel

container_of is wonderful, but not casting at all is better.  This
patch changes vmx.c's internal functions to pass "struct vcpu_vmx"
instead of "struct kvm_vcpu" and using container_of.

Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>

diff -r 45a921ab14a2 drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c	Fri Jul 27 17:38:38 2007 +1000
+++ b/drivers/kvm/vmx.c	Fri Jul 27 17:44:34 2007 +1000
@@ -141,9 +141,8 @@ static inline u64 msr_efer_save_restore_
 	return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
 }
 
-static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
+{
 	int efer_offset = vmx->msr_offset_efer;
 	return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
 		msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
@@ -169,9 +168,8 @@ static inline int is_external_interrupt(
 		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
+{
 	int i;
 
 	for (i = 0; i < vmx->nmsrs; ++i)
@@ -180,12 +178,11 @@ static int __find_msr_index(struct kvm_v
 	return -1;
 }
 
-static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+{
 	int i;
 
-	i = __find_msr_index(vcpu, msr);
+	i = __find_msr_index(vmx, msr);
 	if (i >= 0)
 		return &vmx->guest_msrs[i];
 	return NULL;
@@ -206,24 +203,24 @@ static void vmcs_clear(struct vmcs *vmcs
 
 static void __vcpu_clear(void *arg)
 {
-	struct kvm_vcpu *vcpu = arg;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct vcpu_vmx *vmx = arg;
 	int cpu = raw_smp_processor_id();
 
-	if (vcpu->cpu == cpu)
+	if (vmx->vcpu.cpu == cpu)
 		vmcs_clear(vmx->vmcs);
 	if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
 		per_cpu(current_vmcs, cpu) = NULL;
-	rdtscll(vcpu->host_tsc);
-}
-
-static void vcpu_clear(struct kvm_vcpu *vcpu)
-{
-	if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
-		smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
+	rdtscll(vmx->vcpu.host_tsc);
+}
+
+static void vcpu_clear(struct vcpu_vmx *vmx)
+{
+	if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
+		smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
+					 vmx, 0, 1);
 	else
-		__vcpu_clear(vcpu);
-	to_vmx(vcpu)->launched = 0;
+		__vcpu_clear(vmx);
+	vmx->launched = 0;
 }
 
 static unsigned long vmcs_readl(unsigned long field)
@@ -333,23 +330,20 @@ static void reload_tss(void)
 #endif
 }
 
-static void load_transition_efer(struct kvm_vcpu *vcpu)
+static void load_transition_efer(struct vcpu_vmx *vmx)
 {
 	u64 trans_efer;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int efer_offset = vmx->msr_offset_efer;
 
 	trans_efer = vmx->host_msrs[efer_offset].data;
 	trans_efer &= ~EFER_SAVE_RESTORE_BITS;
 	trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
 	wrmsrl(MSR_EFER, trans_efer);
-	vcpu->stat.efer_reload++;
-}
-
-static void vmx_save_host_state(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-
+	vmx->vcpu.stat.efer_reload++;
+}
+
+static void vmx_save_host_state(struct vcpu_vmx *vmx)
+{
 	if (vmx->host_state.loaded)
 		return;
 
@@ -384,20 +378,18 @@ static void vmx_save_host_state(struct k
 #endif
 
 #ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
+	if (is_long_mode(&vmx->vcpu)) {
 		save_msrs(vmx->host_msrs +
 			  vmx->msr_offset_kernel_gs_base, 1);
 	}
 #endif
 	load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
-	if (msr_efer_need_save_restore(vcpu))
-		load_transition_efer(vcpu);
-}
-
-static void vmx_load_host_state(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-
+	if (msr_efer_need_save_restore(vmx))
+		load_transition_efer(vmx);
+}
+
+static void vmx_load_host_state(struct vcpu_vmx *vmx)
+{
 	if (!vmx->host_state.loaded)
 		return;
 
@@ -420,7 +412,7 @@ static void vmx_load_host_state(struct k
 	}
 	save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
 	load_msrs(vmx->host_msrs, vmx->save_nmsrs);
-	if (msr_efer_need_save_restore(vcpu))
+	if (msr_efer_need_save_restore(vmx))
 		load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
 }
 
@@ -438,7 +430,7 @@ static void vmx_vcpu_load(struct kvm_vcp
 	cpu = get_cpu();
 
 	if (vcpu->cpu != cpu)
-		vcpu_clear(vcpu);
+		vcpu_clear(vmx);
 
 	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
 		u8 error;
@@ -479,7 +471,7 @@ static void vmx_vcpu_load(struct kvm_vcp
 
 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 {
-	vmx_load_host_state(vcpu);
+	vmx_load_host_state(to_vmx(vcpu));
 	kvm_put_guest_fpu(vcpu);
 	put_cpu();
 }
@@ -506,7 +498,7 @@ static void vmx_fpu_deactivate(struct kv
 
 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
 {
-	vcpu_clear(vcpu);
+	vcpu_clear(to_vmx(vcpu));
 }
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -554,9 +546,8 @@ static void vmx_inject_gp(struct kvm_vcp
 /*
  * Swap MSR entry in host/guest MSR entry array.
  */
-void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
+{
 	struct kvm_msr_entry tmp;
 
 	tmp = vmx->guest_msrs[to];
@@ -572,44 +563,43 @@ void move_msr_up(struct kvm_vcpu *vcpu, 
  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
  * mode, as fiddling with msrs is very expensive.
  */
-static void setup_msrs(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+static void setup_msrs(struct vcpu_vmx *vmx)
+{
 	int save_nmsrs;
 
 	save_nmsrs = 0;
 #ifdef CONFIG_X86_64
-	if (is_long_mode(vcpu)) {
+	if (is_long_mode(&vmx->vcpu)) {
 		int index;
 
-		index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
+		index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
 		if (index >= 0)
-			move_msr_up(vcpu, index, save_nmsrs++);
-		index = __find_msr_index(vcpu, MSR_LSTAR);
+			move_msr_up(vmx, index, save_nmsrs++);
+		index = __find_msr_index(vmx, MSR_LSTAR);
 		if (index >= 0)
-			move_msr_up(vcpu, index, save_nmsrs++);
-		index = __find_msr_index(vcpu, MSR_CSTAR);
+			move_msr_up(vmx, index, save_nmsrs++);
+		index = __find_msr_index(vmx, MSR_CSTAR);
 		if (index >= 0)
-			move_msr_up(vcpu, index, save_nmsrs++);
-		index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+			move_msr_up(vmx, index, save_nmsrs++);
+		index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
 		if (index >= 0)
-			move_msr_up(vcpu, index, save_nmsrs++);
+			move_msr_up(vmx, index, save_nmsrs++);
 		/*
 		 * MSR_K6_STAR is only needed on long mode guests, and only
 		 * if efer.sce is enabled.
 		 */
-		index = __find_msr_index(vcpu, MSR_K6_STAR);
-		if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
-			move_msr_up(vcpu, index, save_nmsrs++);
+		index = __find_msr_index(vmx, MSR_K6_STAR);
+		if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
+			move_msr_up(vmx, index, save_nmsrs++);
 	}
 #endif
 	vmx->save_nmsrs = save_nmsrs;
 
 #ifdef CONFIG_X86_64
 	vmx->msr_offset_kernel_gs_base =
-		__find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+		__find_msr_index(vmx, MSR_KERNEL_GS_BASE);
 #endif
-	vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
+	vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
 }
 
 /*
@@ -676,7 +666,7 @@ static int vmx_get_msr(struct kvm_vcpu *
 		data = vmcs_readl(GUEST_SYSENTER_ESP);
 		break;
 	default:
-		msr = find_msr_entry(vcpu, msr_index);
+		msr = find_msr_entry(to_vmx(vcpu), msr_index);
 		if (msr) {
 			data = msr->data;
 			break;
@@ -704,7 +694,7 @@ static int vmx_set_msr(struct kvm_vcpu *
 	case MSR_EFER:
 		ret = kvm_set_msr_common(vcpu, msr_index, data);
 		if (vmx->host_state.loaded)
-			load_transition_efer(vcpu);
+			load_transition_efer(vmx);
 		break;
 	case MSR_FS_BASE:
 		vmcs_writel(GUEST_FS_BASE, data);
@@ -726,7 +716,7 @@ static int vmx_set_msr(struct kvm_vcpu *
 		guest_write_tsc(data);
 		break;
 	default:
-		msr = find_msr_entry(vcpu, msr_index);
+		msr = find_msr_entry(vmx, msr_index);
 		if (msr) {
 			msr->data = data;
 			if (vmx->host_state.loaded)
@@ -1038,7 +1028,7 @@ static void enter_lmode(struct kvm_vcpu 
 
 	vcpu->shadow_efer |= EFER_LMA;
 
-	find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
+	find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
 	vmcs_write32(VM_ENTRY_CONTROLS,
 		     vmcs_read32(VM_ENTRY_CONTROLS)
 		     | VM_ENTRY_CONTROLS_IA32E_MASK);
@@ -1108,7 +1098,8 @@ static void vmx_set_cr4(struct kvm_vcpu 
 
 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-	struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
 
 	vcpu->shadow_efer = efer;
 	if (efer & EFER_LMA) {
@@ -1124,7 +1115,7 @@ static void vmx_set_efer(struct kvm_vcpu
 
 		msr->data = efer & ~EFER_LME;
 	}
-	setup_msrs(vcpu);
+	setup_msrs(vmx);
 }
 
 #endif
@@ -1297,9 +1288,9 @@ static void seg_setup(int seg)
 /*
  * Sets up the vmcs for emulated real mode.
  */
-static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+{
+	
 	u32 host_sysenter_cs;
 	u32 junk;
 	unsigned long a;
@@ -1308,19 +1299,18 @@ static int vmx_vcpu_setup(struct kvm_vcp
 	int ret = 0;
 	unsigned long kvm_vmx_return;
 
-	if (!init_rmode_tss(vcpu->kvm)) {
+	if (!init_rmode_tss(vmx->vcpu.kvm)) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	memset(vcpu->regs, 0, sizeof(vcpu->regs));
-	vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
-	vcpu->cr8 = 0;
-	vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
-	if (vcpu->vcpu_id == 0)
-		vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
-
-	fx_init(vcpu);
+	vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+	vmx->vcpu.cr8 = 0;
+	vmx->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+	if (vmx->vcpu.vcpu_id == 0)
+		vmx->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
+
+	fx_init(&vmx->vcpu);
 
 	/*
 	 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
@@ -1455,7 +1445,7 @@ static int vmx_vcpu_setup(struct kvm_vcp
 		++vmx->nmsrs;
 	}
 
-	setup_msrs(vcpu);
+	setup_msrs(vmx);
 
 	vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
 		     	       (HOST_IS_64 << 9));  /* 22.2,1, 20.7.1 */
@@ -1473,14 +1463,14 @@ static int vmx_vcpu_setup(struct kvm_vcp
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
 	vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
 
-	vcpu->cr0 = 0x60000010;
-	vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
-	vmx_set_cr4(vcpu, 0);
+	vmx->vcpu.cr0 = 0x60000010;
+	vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
+	vmx_set_cr4(&vmx->vcpu, 0);
 #ifdef CONFIG_X86_64
-	vmx_set_efer(vcpu, 0);
+	vmx_set_efer(&vmx->vcpu, 0);
 #endif
-	vmx_fpu_activate(vcpu);
-	update_exception_bitmap(vcpu);
+	vmx_fpu_activate(&vmx->vcpu);
+	update_exception_bitmap(&vmx->vcpu);
 
 	return 0;
 
@@ -2071,7 +2061,7 @@ again:
 	if (!vcpu->mmio_read_completed)
 		do_interrupt_requests(vcpu, kvm_run);
 
-	vmx_save_host_state(vcpu);
+	vmx_save_host_state(vmx);
 	kvm_load_guest_fpu(vcpu);
 
 	/*
@@ -2292,7 +2282,7 @@ static void vmx_free_vmcs(struct kvm_vcp
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
 	if (vmx->vmcs) {
-		on_each_cpu(__vcpu_clear, vcpu, 0, 1);
+		on_each_cpu(__vcpu_clear, vmx, 0, 1);
 		free_vmcs(vmx->vmcs);
 		vmx->vmcs = NULL;
 	}
@@ -2338,7 +2328,7 @@ static struct kvm_vcpu *vmx_create_vcpu(
 	vmcs_clear(vmx->vmcs);
 
 	vmx_vcpu_load(&vmx->vcpu);
-	err = vmx_vcpu_setup(&vmx->vcpu);
+	err = vmx_vcpu_setup(vmx);
 	vmx_vcpu_put(&vmx->vcpu);
 	if (err)
 		goto free_vmcs;



-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2007-07-30 13:09 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-07-30  6:31 [PATCH 1/4] vmx: pass vcpu_vmx internally Rusty Russell
     [not found] ` <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  6:32   ` [PATCH 2/4] svm: pass vcpu_svm internally Rusty Russell
     [not found]     ` <1185777179.12151.149.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  6:36       ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
     [not found]         ` <1185777368.12151.152.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  6:39           ` [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops Rusty Russell
     [not found]             ` <1185777542.12151.156.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  9:12               ` Avi Kivity
     [not found]                 ` <46ADAB67.5070805-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 10:04                   ` Rusty Russell
     [not found]                     ` <1185789882.6131.30.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 10:26                       ` Avi Kivity
     [not found]                         ` <46ADBCBC.9050207-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 11:12                           ` [PATCH 1/2] Use kmem cache for allocating vcpus Rusty Russell
     [not found]                             ` <1185793939.6131.38.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 11:13                               ` [PATCH 2/2] Use alignment properties of vcpu to simplify FPU ops Rusty Russell
     [not found]                                 ` <1185794023.6131.41.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 13:09                                   ` Avi Kivity
2007-07-30  6:39           ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
2007-07-30  9:03       ` [PATCH 2/4] svm: pass vcpu_svm internally Avi Kivity
     [not found]         ` <46ADA971.9030406-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 10:07           ` [PATCH 1/2] svm de-containization Rusty Russell
     [not found]             ` <1185790028.6131.32.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 10:08               ` [PATCH 2/2] svm internal function name cleanup Rusty Russell
2007-07-30 10:22               ` [PATCH 1/2] svm de-containization Avi Kivity
2007-07-30  9:02   ` [PATCH 1/4] vmx: pass vcpu_vmx internally Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox