public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
To: kvm-devel <kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org>
Subject: [PATCH 2/4] svm: pass vcpu_svm internally
Date: Mon, 30 Jul 2007 16:32:58 +1000	[thread overview]
Message-ID: <1185777179.12151.149.camel@localhost.localdomain> (raw)
In-Reply-To: <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>

container_of is wonderful, but not casting at all is better.  This
patch changes svm.c's internal functions to pass "struct vcpu_svm"
instead of "struct kvm_vcpu" and using container_of.

It also changes some internal function names:
1) io_adress -> io_address  (de-germanify the spelling)
2) kvm_reput_irq -> reput_irq  (it's not a generic kvm function)
3) kvm_do_inject_irq -> (it's not a generic kvm function)

Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>

diff -r 107739271dfc drivers/kvm/svm.c
--- a/drivers/kvm/svm.c	Fri Jul 27 17:44:47 2007 +1000
+++ b/drivers/kvm/svm.c	Fri Jul 27 18:06:44 2007 +1000
@@ -98,9 +98,9 @@ static inline u32 svm_has(u32 feat)
 	return svm_features & feat;
 }
 
-static unsigned get_addr_size(struct kvm_vcpu *vcpu)
-{
-	struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save;
+static unsigned get_addr_size(struct vcpu_svm *svm)
+{
+	struct vmcb_save_area *sa = &svm->vmcb->save;
 	u16 cs_attrib;
 
 	if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
@@ -866,17 +866,15 @@ static void save_host_msrs(struct kvm_vc
 #endif
 }
 
-static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+{
 	if (svm_data->next_asid > svm_data->max_asid) {
 		++svm_data->asid_generation;
 		svm_data->next_asid = 1;
 		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
 	}
 
-	vcpu->cpu = svm_data->cpu;
+	svm->vcpu.cpu = svm_data->cpu;
 	svm->asid_generation = svm_data->asid_generation;
 	svm->vmcb->control.asid = svm_data->next_asid++;
 }
@@ -930,42 +928,43 @@ static void svm_set_dr(struct kvm_vcpu *
 	}
 }
 
-static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
+static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	u32 exit_int_info = svm->vmcb->control.exit_int_info;
+	struct kvm *kvm = svm->vcpu.kvm;
 	u64 fault_address;
 	u32 error_code;
 	enum emulation_result er;
 	int r;
 
 	if (is_external_interrupt(exit_int_info))
-		push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
-
-	spin_lock(&vcpu->kvm->lock);
+		push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
+
+	spin_lock(&kvm->lock);
 
 	fault_address  = svm->vmcb->control.exit_info_2;
 	error_code = svm->vmcb->control.exit_info_1;
-	r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
+	r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
 	if (r < 0) {
-		spin_unlock(&vcpu->kvm->lock);
+		spin_unlock(&kvm->lock);
 		return r;
 	}
 	if (!r) {
-		spin_unlock(&vcpu->kvm->lock);
+		spin_unlock(&kvm->lock);
 		return 1;
 	}
-	er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
-	spin_unlock(&vcpu->kvm->lock);
+	er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
+				 error_code);
+	spin_unlock(&kvm->lock);
 
 	switch (er) {
 	case EMULATE_DONE:
 		return 1;
 	case EMULATE_DO_MMIO:
-		++vcpu->stat.mmio_exits;
+		++svm->vcpu.stat.mmio_exits;
 		return 0;
 	case EMULATE_FAIL:
-		vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
+		vcpu_printf(&svm->vcpu, "%s: emulate fail\n", __FUNCTION__);
 		break;
 	default:
 		BUG();
@@ -975,21 +974,18 @@ static int pf_interception(struct kvm_vc
 	return 0;
 }
 
-static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-
+static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
-	if (!(vcpu->cr0 & X86_CR0_TS))
+	if (!(svm->vcpu.cr0 & X86_CR0_TS))
 		svm->vmcb->save.cr0 &= ~X86_CR0_TS;
-	vcpu->fpu_active = 1;
+	svm->vcpu.fpu_active = 1;
 
 	return 1;
 }
 
-static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
+static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	/*
 	 * VMCB is undefined after a SHUTDOWN intercept
 	 * so reinitialize it.
@@ -1001,11 +997,10 @@ static int shutdown_interception(struct 
 	return 0;
 }
 
-static int io_get_override(struct kvm_vcpu *vcpu,
+static int io_get_override(struct vcpu_svm *svm,
 			  struct vmcb_seg **seg,
 			  int *addr_override)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
 	u8 inst[MAX_INST_SIZE];
 	unsigned ins_length;
 	gva_t rip;
@@ -1024,7 +1019,7 @@ static int io_get_override(struct kvm_vc
 		       svm->vmcb->control.exit_info_2,
 		       ins_length);
 
-	if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
+	if (kvm_read_guest(&svm->vcpu, rip, ins_length, inst) != ins_length)
 		/* #PF */
 		return 0;
 
@@ -1065,28 +1060,27 @@ static int io_get_override(struct kvm_vc
 	return 0;
 }
 
-static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
+static unsigned long io_address(struct vcpu_svm *svm, int ins, gva_t *address)
 {
 	unsigned long addr_mask;
 	unsigned long *reg;
 	struct vmcb_seg *seg;
 	int addr_override;
-	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb_save_area *save_area = &svm->vmcb->save;
 	u16 cs_attrib = save_area->cs.attrib;
-	unsigned addr_size = get_addr_size(vcpu);
-
-	if (!io_get_override(vcpu, &seg, &addr_override))
+	unsigned addr_size = get_addr_size(svm);
+
+	if (!io_get_override(svm, &seg, &addr_override))
 		return 0;
 
 	if (addr_override)
 		addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
 
 	if (ins) {
-		reg = &vcpu->regs[VCPU_REGS_RDI];
+		reg = &svm->vcpu.regs[VCPU_REGS_RDI];
 		seg = &svm->vmcb->save.es;
 	} else {
-		reg = &vcpu->regs[VCPU_REGS_RSI];
+		reg = &svm->vcpu.regs[VCPU_REGS_RSI];
 		seg = (seg) ? seg : &svm->vmcb->save.ds;
 	}
 
@@ -1099,7 +1093,7 @@ static unsigned long io_adress(struct kv
 	}
 
 	if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
-		svm_inject_gp(vcpu, 0);
+		svm_inject_gp(&svm->vcpu, 0);
 		return 0;
 	}
 
@@ -1107,16 +1101,15 @@ static unsigned long io_adress(struct kv
 	return addr_mask;
 }
 
-static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
+static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
 	int size, down, in, string, rep;
 	unsigned port;
 	unsigned long count;
 	gva_t address = 0;
 
-	++vcpu->stat.io_exits;
+	++svm->vcpu.stat.io_exits;
 
 	svm->next_rip = svm->vmcb->control.exit_info_2;
 
@@ -1131,69 +1124,66 @@ static int io_interception(struct kvm_vc
 	if (string) {
 		unsigned addr_mask;
 
-		addr_mask = io_adress(vcpu, in, &address);
+		addr_mask = io_address(svm, in, &address);
 		if (!addr_mask) {
 			printk(KERN_DEBUG "%s: get io address failed\n",
 			       __FUNCTION__);
 			return 1;
 		}
 
 		if (rep)
-			count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
-	}
-	return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
-			     address, rep, port);
-}
-
-static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+			count = svm->vcpu.regs[VCPU_REGS_RCX] & addr_mask;
+	}
+	return kvm_setup_pio(&svm->vcpu, kvm_run, in, size, count, string,
+			     down, address, rep, port);
+}
+
+static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
 	return 1;
 }
 
-static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-
+static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	svm->next_rip = svm->vmcb->save.rip + 1;
-	skip_emulated_instruction(vcpu);
-	return kvm_emulate_halt(vcpu);
-}
-
-static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-
+	skip_emulated_instruction(&svm->vcpu);
+	return kvm_emulate_halt(&svm->vcpu);
+}
+
+static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	svm->next_rip = svm->vmcb->save.rip + 3;
-	skip_emulated_instruction(vcpu);
-	return kvm_hypercall(vcpu, kvm_run);
-}
-
-static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	inject_ud(vcpu);
+	skip_emulated_instruction(&svm->vcpu);
+	return kvm_hypercall(&svm->vcpu, kvm_run);
+}
+
+static int invalid_op_interception(struct vcpu_svm *svm,
+				   struct kvm_run *kvm_run)
+{
+	inject_ud(&svm->vcpu);
 	return 1;
 }
 
-static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
+static int task_switch_interception(struct vcpu_svm *svm,
+				    struct kvm_run *kvm_run)
+{
+	printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
 	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
 	return 0;
 }
 
-static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-
+static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	svm->next_rip = svm->vmcb->save.rip + 2;
-	kvm_emulate_cpuid(vcpu);
+	kvm_emulate_cpuid(&svm->vcpu);
 	return 1;
 }
 
-static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
-		printk(KERN_ERR "%s: failed\n", __FUNCTION__);
+static int emulate_on_interception(struct vcpu_svm *svm,
+				   struct kvm_run *kvm_run)
+{
+	if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
+		printk(KERN_ERR "%s: failed\n", __FUNCTION__);
 	return 1;
 }
 
@@ -1240,19 +1230,18 @@ static int svm_get_msr(struct kvm_vcpu *
 	return 0;
 }
 
-static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+	u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
 	u64 data;
 
-	if (svm_get_msr(vcpu, ecx, &data))
-		svm_inject_gp(vcpu, 0);
+	if (svm_get_msr(&svm->vcpu, ecx, &data))
+		svm_inject_gp(&svm->vcpu, 0);
 	else {
 		svm->vmcb->save.rax = data & 0xffffffff;
-		vcpu->regs[VCPU_REGS_RDX] = data >> 32;
+		svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
 		svm->next_rip = svm->vmcb->save.rip + 2;
-		skip_emulated_instruction(vcpu);
+		skip_emulated_instruction(&svm->vcpu);
 	}
 	return 1;
 }
@@ -1301,29 +1290,28 @@ static int svm_set_msr(struct kvm_vcpu *
 	return 0;
 }
 
-static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-	u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+	u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
 	u64 data = (svm->vmcb->save.rax & -1u)
-		| ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
+		| ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
 	svm->next_rip = svm->vmcb->save.rip + 2;
-	if (svm_set_msr(vcpu, ecx, data))
-		svm_inject_gp(vcpu, 0);
+	if (svm_set_msr(&svm->vcpu, ecx, data))
+		svm_inject_gp(&svm->vcpu, 0);
 	else
-		skip_emulated_instruction(vcpu);
+		skip_emulated_instruction(&svm->vcpu);
 	return 1;
 }
 
-static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	if (to_svm(vcpu)->vmcb->control.exit_info_1)
-		return wrmsr_interception(vcpu, kvm_run);
+static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+	if (svm->vmcb->control.exit_info_1)
+		return wrmsr_interception(svm, kvm_run);
 	else
-		return rdmsr_interception(vcpu, kvm_run);
-}
-
-static int interrupt_window_interception(struct kvm_vcpu *vcpu,
+		return rdmsr_interception(svm, kvm_run);
+}
+
+static int interrupt_window_interception(struct vcpu_svm *svm,
 				   struct kvm_run *kvm_run)
 {
 	/*
@@ -1331,8 +1319,8 @@ static int interrupt_window_interception
 	 * possible
 	 */
 	if (kvm_run->request_interrupt_window &&
-	    !vcpu->irq_summary) {
-		++vcpu->stat.irq_window_exits;
+	    !svm->vcpu.irq_summary) {
+		++svm->vcpu.stat.irq_window_exits;
 		kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 		return 0;
 	}
@@ -1340,7 +1328,7 @@ static int interrupt_window_interception
 	return 1;
 }
 
-static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
+static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
 				      struct kvm_run *kvm_run) = {
 	[SVM_EXIT_READ_CR0]           		= emulate_on_interception,
 	[SVM_EXIT_READ_CR3]           		= emulate_on_interception,
@@ -1387,9 +1375,8 @@ static int (*svm_exit_handlers[])(struct
 };
 
 
-static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
+static int handle_exit(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
 	u32 exit_code = svm->vmcb->control.exit_code;
 
 	if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
@@ -1406,7 +1393,7 @@ static int handle_exit(struct kvm_vcpu *
 		return 0;
 	}
 
-	return svm_exit_handlers[exit_code](vcpu, kvm_run);
+	return svm_exit_handlers[exit_code](svm, kvm_run);
 }
 
 static void reload_tss(struct kvm_vcpu *vcpu)
@@ -1418,80 +1405,77 @@ static void reload_tss(struct kvm_vcpu *
 	load_TR_desc();
 }
 
-static void pre_svm_run(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
+static void pre_svm_run(struct vcpu_svm *svm)
+{
 	int cpu = raw_smp_processor_id();
 
 	struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
 
 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
-	if (vcpu->cpu != cpu ||
+	if (svm->vcpu.cpu != cpu ||
 	    svm->asid_generation != svm_data->asid_generation)
-		new_asid(vcpu, svm_data);
-}
-
-
-static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
+		new_asid(svm, svm_data);
+}
+
+
+static inline void inject_irq(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control;
 
-	control = &to_svm(vcpu)->vmcb->control;
-	control->int_vector = pop_irq(vcpu);
+	control = &svm->vmcb->control;
+	control->int_vector = pop_irq(&svm->vcpu);
 	control->int_ctl &= ~V_INTR_PRIO_MASK;
 	control->int_ctl |= V_IRQ_MASK |
 		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
 }
 
-static void kvm_reput_irq(struct kvm_vcpu *vcpu)
-{
-	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+static void reput_irq(struct vcpu_svm *svm)
+{
+	struct vmcb_control_area *control = &svm->vmcb->control;
 
 	if (control->int_ctl & V_IRQ_MASK) {
 		control->int_ctl &= ~V_IRQ_MASK;
-		push_irq(vcpu, control->int_vector);
-	}
-
-	vcpu->interrupt_window_open =
+		push_irq(&svm->vcpu, control->int_vector);
+	}
+
+	svm->vcpu.interrupt_window_open =
 		!(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
 }
 
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+static void do_interrupt_requests(struct vcpu_svm *svm,
 				       struct kvm_run *kvm_run)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb_control_area *control = &svm->vmcb->control;
 
-	vcpu->interrupt_window_open =
+	svm->vcpu.interrupt_window_open =
 		(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
 		 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
 
-	if (vcpu->interrupt_window_open && vcpu->irq_summary)
+	if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
 		/*
 		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
 		 */
-		kvm_do_inject_irq(vcpu);
+		inject_irq(svm);
 
 	/*
 	 * Interrupts blocked.  Wait for unblock.
 	 */
-	if (!vcpu->interrupt_window_open &&
-	    (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
+	if (!svm->vcpu.interrupt_window_open &&
+	    (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) {
 		control->intercept |= 1ULL << INTERCEPT_VINTR;
 	} else
 		control->intercept &= ~(1ULL << INTERCEPT_VINTR);
 }
 
-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+static void post_kvm_run_save(struct vcpu_svm *svm,
 			      struct kvm_run *kvm_run)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
-
-	kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
-						  vcpu->irq_summary == 0);
+	kvm_run->ready_for_interrupt_injection
+		= (svm->vcpu.interrupt_window_open &&
+		   svm->vcpu.irq_summary == 0);
 	kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
-	kvm_run->cr8 = vcpu->cr8;
-	kvm_run->apic_base = vcpu->apic_base;
+	kvm_run->cr8 = svm->vcpu.cr8;
+	kvm_run->apic_base = svm->vcpu.apic_base;
 }
 
 /*
@@ -1500,13 +1484,13 @@ static void post_kvm_run_save(struct kvm
  *
  * No need to exit to userspace if we already have an interrupt queued.
  */
-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+static int dm_request_for_irq_injection(struct vcpu_svm *svm,
 					  struct kvm_run *kvm_run)
 {
-	return (!vcpu->irq_summary &&
+	return (!svm->vcpu.irq_summary &&
 		kvm_run->request_interrupt_window &&
-		vcpu->interrupt_window_open &&
-		(to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
+		svm->vcpu.interrupt_window_open &&
+		(svm->vmcb->save.rflags & X86_EFLAGS_IF));
 }
 
 static void save_db_regs(unsigned long *db_regs)
@@ -1544,7 +1528,7 @@ again:
 		return r;
 
 	if (!vcpu->mmio_read_completed)
-		do_interrupt_requests(vcpu, kvm_run);
+		do_interrupt_requests(svm, kvm_run);
 
 	clgi();
 
@@ -1553,7 +1537,7 @@ again:
 		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
 		    svm_flush_tlb(vcpu);
 
-	pre_svm_run(vcpu);
+	pre_svm_run(svm);
 
 	save_host_msrs(vcpu);
 	fs_selector = read_fs();
@@ -1713,7 +1697,7 @@ again:
 
 	stgi();
 
-	kvm_reput_irq(vcpu);
+	reput_irq(svm);
 
 	svm->next_rip = 0;
 
@@ -1721,29 +1705,29 @@ again:
 		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
 		kvm_run->fail_entry.hardware_entry_failure_reason
 			= svm->vmcb->control.exit_code;
-		post_kvm_run_save(vcpu, kvm_run);
+		post_kvm_run_save(svm, kvm_run);
 		return 0;
 	}
 
-	r = handle_exit(vcpu, kvm_run);
+	r = handle_exit(svm, kvm_run);
 	if (r > 0) {
 		if (signal_pending(current)) {
 			++vcpu->stat.signal_exits;
-			post_kvm_run_save(vcpu, kvm_run);
+			post_kvm_run_save(svm, kvm_run);
 			kvm_run->exit_reason = KVM_EXIT_INTR;
 			return -EINTR;
 		}
 
-		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+		if (dm_request_for_irq_injection(svm, kvm_run)) {
 			++vcpu->stat.request_irq_exits;
-			post_kvm_run_save(vcpu, kvm_run);
+			post_kvm_run_save(svm, kvm_run);
 			kvm_run->exit_reason = KVM_EXIT_INTR;
 			return -EINTR;
 		}
 		kvm_resched(vcpu);
 		goto again;
 	}
-	post_kvm_run_save(vcpu, kvm_run);
+	post_kvm_run_save(svm, kvm_run);
 	return r;
 }
 



-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/

  parent reply	other threads:[~2007-07-30  6:32 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-07-30  6:31 [PATCH 1/4] vmx: pass vcpu_vmx internally Rusty Russell
     [not found] ` <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  6:32   ` Rusty Russell [this message]
     [not found]     ` <1185777179.12151.149.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  6:36       ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
     [not found]         ` <1185777368.12151.152.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  6:39           ` [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops Rusty Russell
     [not found]             ` <1185777542.12151.156.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30  9:12               ` Avi Kivity
     [not found]                 ` <46ADAB67.5070805-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 10:04                   ` Rusty Russell
     [not found]                     ` <1185789882.6131.30.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 10:26                       ` Avi Kivity
     [not found]                         ` <46ADBCBC.9050207-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 11:12                           ` [PATCH 1/2] Use kmem cache for allocating vcpus Rusty Russell
     [not found]                             ` <1185793939.6131.38.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 11:13                               ` [PATCH 2/2] Use alignment properties of vcpu to simplify FPU ops Rusty Russell
     [not found]                                 ` <1185794023.6131.41.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 13:09                                   ` Avi Kivity
2007-07-30  6:39           ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
2007-07-30  9:03       ` [PATCH 2/4] svm: pass vcpu_svm internally Avi Kivity
     [not found]         ` <46ADA971.9030406-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 10:07           ` [PATCH 1/2] svm de-containization Rusty Russell
     [not found]             ` <1185790028.6131.32.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 10:08               ` [PATCH 2/2] svm internal function name cleanup Rusty Russell
2007-07-30 10:22               ` [PATCH 1/2] svm de-containization Avi Kivity
2007-07-30  9:02   ` [PATCH 1/4] vmx: pass vcpu_vmx internally Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1185777179.12151.149.camel@localhost.localdomain \
    --to=rusty-8n+1lvoiyb80n/f98k4iww@public.gmane.org \
    --cc=kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox