* [PATCH 1/4] vmx: pass vcpu_vmx internally
@ 2007-07-30 6:31 Rusty Russell
[not found] ` <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 6:31 UTC (permalink / raw)
To: kvm-devel
container_of is wonderful, but not casting at all is better. This
patch changes vmx.c's internal functions to pass "struct vcpu_vmx"
instead of "struct kvm_vcpu" and using container_of.
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r 45a921ab14a2 drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c Fri Jul 27 17:38:38 2007 +1000
+++ b/drivers/kvm/vmx.c Fri Jul 27 17:44:34 2007 +1000
@@ -141,9 +141,8 @@ static inline u64 msr_efer_save_restore_
return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
}
-static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
+{
int efer_offset = vmx->msr_offset_efer;
return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
@@ -169,9 +168,8 @@ static inline int is_external_interrupt(
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
+{
int i;
for (i = 0; i < vmx->nmsrs; ++i)
@@ -180,12 +178,11 @@ static int __find_msr_index(struct kvm_v
return -1;
}
-static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+{
int i;
- i = __find_msr_index(vcpu, msr);
+ i = __find_msr_index(vmx, msr);
if (i >= 0)
return &vmx->guest_msrs[i];
return NULL;
@@ -206,24 +203,24 @@ static void vmcs_clear(struct vmcs *vmcs
static void __vcpu_clear(void *arg)
{
- struct kvm_vcpu *vcpu = arg;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vcpu_vmx *vmx = arg;
int cpu = raw_smp_processor_id();
- if (vcpu->cpu == cpu)
+ if (vmx->vcpu.cpu == cpu)
vmcs_clear(vmx->vmcs);
if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- rdtscll(vcpu->host_tsc);
-}
-
-static void vcpu_clear(struct kvm_vcpu *vcpu)
-{
- if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
- smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
+ rdtscll(vmx->vcpu.host_tsc);
+}
+
+static void vcpu_clear(struct vcpu_vmx *vmx)
+{
+ if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
+ smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
+ vmx, 0, 1);
else
- __vcpu_clear(vcpu);
- to_vmx(vcpu)->launched = 0;
+ __vcpu_clear(vmx);
+ vmx->launched = 0;
}
static unsigned long vmcs_readl(unsigned long field)
@@ -333,23 +330,20 @@ static void reload_tss(void)
#endif
}
-static void load_transition_efer(struct kvm_vcpu *vcpu)
+static void load_transition_efer(struct vcpu_vmx *vmx)
{
u64 trans_efer;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
int efer_offset = vmx->msr_offset_efer;
trans_efer = vmx->host_msrs[efer_offset].data;
trans_efer &= ~EFER_SAVE_RESTORE_BITS;
trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
wrmsrl(MSR_EFER, trans_efer);
- vcpu->stat.efer_reload++;
-}
-
-static void vmx_save_host_state(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
+ vmx->vcpu.stat.efer_reload++;
+}
+
+static void vmx_save_host_state(struct vcpu_vmx *vmx)
+{
if (vmx->host_state.loaded)
return;
@@ -384,20 +378,18 @@ static void vmx_save_host_state(struct k
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
+ if (is_long_mode(&vmx->vcpu)) {
save_msrs(vmx->host_msrs +
vmx->msr_offset_kernel_gs_base, 1);
}
#endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vcpu))
- load_transition_efer(vcpu);
-}
-
-static void vmx_load_host_state(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
+ if (msr_efer_need_save_restore(vmx))
+ load_transition_efer(vmx);
+}
+
+static void vmx_load_host_state(struct vcpu_vmx *vmx)
+{
if (!vmx->host_state.loaded)
return;
@@ -420,7 +412,7 @@ static void vmx_load_host_state(struct k
}
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vcpu))
+ if (msr_efer_need_save_restore(vmx))
load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
}
@@ -438,7 +430,7 @@ static void vmx_vcpu_load(struct kvm_vcp
cpu = get_cpu();
if (vcpu->cpu != cpu)
- vcpu_clear(vcpu);
+ vcpu_clear(vmx);
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error;
@@ -479,7 +471,7 @@ static void vmx_vcpu_load(struct kvm_vcp
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
- vmx_load_host_state(vcpu);
+ vmx_load_host_state(to_vmx(vcpu));
kvm_put_guest_fpu(vcpu);
put_cpu();
}
@@ -506,7 +498,7 @@ static void vmx_fpu_deactivate(struct kv
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{
- vcpu_clear(vcpu);
+ vcpu_clear(to_vmx(vcpu));
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -554,9 +546,8 @@ static void vmx_inject_gp(struct kvm_vcp
/*
* Swap MSR entry in host/guest MSR entry array.
*/
-void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
+{
struct kvm_msr_entry tmp;
tmp = vmx->guest_msrs[to];
@@ -572,44 +563,43 @@ void move_msr_up(struct kvm_vcpu *vcpu,
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
* mode, as fiddling with msrs is very expensive.
*/
-static void setup_msrs(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+static void setup_msrs(struct vcpu_vmx *vmx)
+{
int save_nmsrs;
save_nmsrs = 0;
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
+ if (is_long_mode(&vmx->vcpu)) {
int index;
- index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
+ index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
- index = __find_msr_index(vcpu, MSR_LSTAR);
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_LSTAR);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
- index = __find_msr_index(vcpu, MSR_CSTAR);
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
- index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
+ move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_K6_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
*/
- index = __find_msr_index(vcpu, MSR_K6_STAR);
- if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
- move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_K6_STAR);
+ if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
+ move_msr_up(vmx, index, save_nmsrs++);
}
#endif
vmx->save_nmsrs = save_nmsrs;
#ifdef CONFIG_X86_64
vmx->msr_offset_kernel_gs_base =
- __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+ __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
#endif
- vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
+ vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
}
/*
@@ -676,7 +666,7 @@ static int vmx_get_msr(struct kvm_vcpu *
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
default:
- msr = find_msr_entry(vcpu, msr_index);
+ msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
data = msr->data;
break;
@@ -704,7 +694,7 @@ static int vmx_set_msr(struct kvm_vcpu *
case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_index, data);
if (vmx->host_state.loaded)
- load_transition_efer(vcpu);
+ load_transition_efer(vmx);
break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
@@ -726,7 +716,7 @@ static int vmx_set_msr(struct kvm_vcpu *
guest_write_tsc(data);
break;
default:
- msr = find_msr_entry(vcpu, msr_index);
+ msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
if (vmx->host_state.loaded)
@@ -1038,7 +1028,7 @@ static void enter_lmode(struct kvm_vcpu
vcpu->shadow_efer |= EFER_LMA;
- find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
+ find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
| VM_ENTRY_CONTROLS_IA32E_MASK);
@@ -1108,7 +1098,8 @@ static void vmx_set_cr4(struct kvm_vcpu
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
vcpu->shadow_efer = efer;
if (efer & EFER_LMA) {
@@ -1124,7 +1115,7 @@ static void vmx_set_efer(struct kvm_vcpu
msr->data = efer & ~EFER_LME;
}
- setup_msrs(vcpu);
+ setup_msrs(vmx);
}
#endif
@@ -1297,9 +1288,9 @@ static void seg_setup(int seg)
/*
* Sets up the vmcs for emulated real mode.
*/
-static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+{
+
u32 host_sysenter_cs;
u32 junk;
unsigned long a;
@@ -1308,19 +1299,18 @@ static int vmx_vcpu_setup(struct kvm_vcp
int ret = 0;
unsigned long kvm_vmx_return;
- if (!init_rmode_tss(vcpu->kvm)) {
+ if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
}
- memset(vcpu->regs, 0, sizeof(vcpu->regs));
- vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
- vcpu->cr8 = 0;
- vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
- if (vcpu->vcpu_id == 0)
- vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
-
- fx_init(vcpu);
+ vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+ vmx->vcpu.cr8 = 0;
+ vmx->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vmx->vcpu.vcpu_id == 0)
+ vmx->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
+
+ fx_init(&vmx->vcpu);
/*
* GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
@@ -1455,7 +1445,7 @@ static int vmx_vcpu_setup(struct kvm_vcp
++vmx->nmsrs;
}
- setup_msrs(vcpu);
+ setup_msrs(vmx);
vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
(HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */
@@ -1473,14 +1463,14 @@ static int vmx_vcpu_setup(struct kvm_vcp
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
- vcpu->cr0 = 0x60000010;
- vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
- vmx_set_cr4(vcpu, 0);
+ vmx->vcpu.cr0 = 0x60000010;
+ vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
+ vmx_set_cr4(&vmx->vcpu, 0);
#ifdef CONFIG_X86_64
- vmx_set_efer(vcpu, 0);
+ vmx_set_efer(&vmx->vcpu, 0);
#endif
- vmx_fpu_activate(vcpu);
- update_exception_bitmap(vcpu);
+ vmx_fpu_activate(&vmx->vcpu);
+ update_exception_bitmap(&vmx->vcpu);
return 0;
@@ -2071,7 +2061,7 @@ again:
if (!vcpu->mmio_read_completed)
do_interrupt_requests(vcpu, kvm_run);
- vmx_save_host_state(vcpu);
+ vmx_save_host_state(vmx);
kvm_load_guest_fpu(vcpu);
/*
@@ -2292,7 +2282,7 @@ static void vmx_free_vmcs(struct kvm_vcp
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->vmcs) {
- on_each_cpu(__vcpu_clear, vcpu, 0, 1);
+ on_each_cpu(__vcpu_clear, vmx, 0, 1);
free_vmcs(vmx->vmcs);
vmx->vmcs = NULL;
}
@@ -2338,7 +2328,7 @@ static struct kvm_vcpu *vmx_create_vcpu(
vmcs_clear(vmx->vmcs);
vmx_vcpu_load(&vmx->vcpu);
- err = vmx_vcpu_setup(&vmx->vcpu);
+ err = vmx_vcpu_setup(vmx);
vmx_vcpu_put(&vmx->vcpu);
if (err)
goto free_vmcs;
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 2/4] svm: pass vcpu_svm internally
[not found] ` <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 6:32 ` Rusty Russell
[not found] ` <1185777179.12151.149.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 9:02 ` [PATCH 1/4] vmx: pass vcpu_vmx internally Avi Kivity
1 sibling, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 6:32 UTC (permalink / raw)
To: kvm-devel
container_of is wonderful, but not casting at all is better. This
patch changes svm.c's internal functions to pass "struct vcpu_svm"
instead of "struct kvm_vcpu" and using container_of.
It also changes some internal function names:
1) io_adress -> io_address (de-germanify the spelling)
2) kvm_reput_irq -> reput_irq (it's not a generic kvm function)
3) kvm_do_inject_irq -> (it's not a generic kvm function)
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r 107739271dfc drivers/kvm/svm.c
--- a/drivers/kvm/svm.c Fri Jul 27 17:44:47 2007 +1000
+++ b/drivers/kvm/svm.c Fri Jul 27 18:06:44 2007 +1000
@@ -98,9 +98,9 @@ static inline u32 svm_has(u32 feat)
return svm_features & feat;
}
-static unsigned get_addr_size(struct kvm_vcpu *vcpu)
-{
- struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save;
+static unsigned get_addr_size(struct vcpu_svm *svm)
+{
+ struct vmcb_save_area *sa = &svm->vmcb->save;
u16 cs_attrib;
if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
@@ -866,17 +866,15 @@ static void save_host_msrs(struct kvm_vc
#endif
}
-static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+{
if (svm_data->next_asid > svm_data->max_asid) {
++svm_data->asid_generation;
svm_data->next_asid = 1;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
- vcpu->cpu = svm_data->cpu;
+ svm->vcpu.cpu = svm_data->cpu;
svm->asid_generation = svm_data->asid_generation;
svm->vmcb->control.asid = svm_data->next_asid++;
}
@@ -930,42 +928,43 @@ static void svm_set_dr(struct kvm_vcpu *
}
}
-static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
u32 exit_int_info = svm->vmcb->control.exit_int_info;
+ struct kvm *kvm = svm->vcpu.kvm;
u64 fault_address;
u32 error_code;
enum emulation_result er;
int r;
if (is_external_interrupt(exit_int_info))
- push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
-
- spin_lock(&vcpu->kvm->lock);
+ push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
+
+ spin_lock(&kvm->lock);
fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1;
- r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
+ r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
if (r < 0) {
- spin_unlock(&vcpu->kvm->lock);
+ spin_unlock(&kvm->lock);
return r;
}
if (!r) {
- spin_unlock(&vcpu->kvm->lock);
+ spin_unlock(&kvm->lock);
return 1;
}
- er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
- spin_unlock(&vcpu->kvm->lock);
+ er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
+ error_code);
+ spin_unlock(&kvm->lock);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_DO_MMIO:
- ++vcpu->stat.mmio_exits;
+ ++svm->vcpu.stat.mmio_exits;
return 0;
case EMULATE_FAIL:
- vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
+ vcpu_printf(&svm->vcpu, "%s: emulate fail\n", __FUNCTION__);
break;
default:
BUG();
@@ -975,21 +974,18 @@ static int pf_interception(struct kvm_vc
return 0;
}
-static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
- if (!(vcpu->cr0 & X86_CR0_TS))
+ if (!(svm->vcpu.cr0 & X86_CR0_TS))
svm->vmcb->save.cr0 &= ~X86_CR0_TS;
- vcpu->fpu_active = 1;
+ svm->vcpu.fpu_active = 1;
return 1;
}
-static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
/*
* VMCB is undefined after a SHUTDOWN intercept
* so reinitialize it.
@@ -1001,11 +997,10 @@ static int shutdown_interception(struct
return 0;
}
-static int io_get_override(struct kvm_vcpu *vcpu,
+static int io_get_override(struct vcpu_svm *svm,
struct vmcb_seg **seg,
int *addr_override)
{
- struct vcpu_svm *svm = to_svm(vcpu);
u8 inst[MAX_INST_SIZE];
unsigned ins_length;
gva_t rip;
@@ -1024,7 +1019,7 @@ static int io_get_override(struct kvm_vc
svm->vmcb->control.exit_info_2,
ins_length);
- if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
+ if (kvm_read_guest(&svm->vcpu, rip, ins_length, inst) != ins_length)
/* #PF */
return 0;
@@ -1065,28 +1060,27 @@ static int io_get_override(struct kvm_vc
return 0;
}
-static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
+static unsigned long io_address(struct vcpu_svm *svm, int ins, gva_t *address)
{
unsigned long addr_mask;
unsigned long *reg;
struct vmcb_seg *seg;
int addr_override;
- struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_save_area *save_area = &svm->vmcb->save;
u16 cs_attrib = save_area->cs.attrib;
- unsigned addr_size = get_addr_size(vcpu);
-
- if (!io_get_override(vcpu, &seg, &addr_override))
+ unsigned addr_size = get_addr_size(svm);
+
+ if (!io_get_override(svm, &seg, &addr_override))
return 0;
if (addr_override)
addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
if (ins) {
- reg = &vcpu->regs[VCPU_REGS_RDI];
+ reg = &svm->vcpu.regs[VCPU_REGS_RDI];
seg = &svm->vmcb->save.es;
} else {
- reg = &vcpu->regs[VCPU_REGS_RSI];
+ reg = &svm->vcpu.regs[VCPU_REGS_RSI];
seg = (seg) ? seg : &svm->vmcb->save.ds;
}
@@ -1099,7 +1093,7 @@ static unsigned long io_adress(struct kv
}
if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
- svm_inject_gp(vcpu, 0);
+ svm_inject_gp(&svm->vcpu, 0);
return 0;
}
@@ -1107,16 +1101,15 @@ static unsigned long io_adress(struct kv
return addr_mask;
}
-static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
int size, down, in, string, rep;
unsigned port;
unsigned long count;
gva_t address = 0;
- ++vcpu->stat.io_exits;
+ ++svm->vcpu.stat.io_exits;
svm->next_rip = svm->vmcb->control.exit_info_2;
@@ -1131,69 +1124,66 @@ static int io_interception(struct kvm_vc
if (string) {
unsigned addr_mask;
- addr_mask = io_adress(vcpu, in, &address);
+ addr_mask = io_address(svm, in, &address);
if (!addr_mask) {
printk(KERN_DEBUG "%s: get io address failed\n",
__FUNCTION__);
return 1;
}
if (rep)
- count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
- }
- return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
- address, rep, port);
-}
-
-static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ count = svm->vcpu.regs[VCPU_REGS_RCX] & addr_mask;
+ }
+ return kvm_setup_pio(&svm->vcpu, kvm_run, in, size, count, string,
+ down, address, rep, port);
+}
+
+static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
return 1;
}
-static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->next_rip = svm->vmcb->save.rip + 1;
- skip_emulated_instruction(vcpu);
- return kvm_emulate_halt(vcpu);
-}
-
-static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+ skip_emulated_instruction(&svm->vcpu);
+ return kvm_emulate_halt(&svm->vcpu);
+}
+
+static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->next_rip = svm->vmcb->save.rip + 3;
- skip_emulated_instruction(vcpu);
- return kvm_hypercall(vcpu, kvm_run);
-}
-
-static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- inject_ud(vcpu);
+ skip_emulated_instruction(&svm->vcpu);
+ return kvm_hypercall(&svm->vcpu, kvm_run);
+}
+
+static int invalid_op_interception(struct vcpu_svm *svm,
+ struct kvm_run *kvm_run)
+{
+ inject_ud(&svm->vcpu);
return 1;
}
-static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
+static int task_switch_interception(struct vcpu_svm *svm,
+ struct kvm_run *kvm_run)
+{
+ printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
return 0;
}
-static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->next_rip = svm->vmcb->save.rip + 2;
- kvm_emulate_cpuid(vcpu);
+ kvm_emulate_cpuid(&svm->vcpu);
return 1;
}
-static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
- printk(KERN_ERR "%s: failed\n", __FUNCTION__);
+static int emulate_on_interception(struct vcpu_svm *svm,
+ struct kvm_run *kvm_run)
+{
+ if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
+ printk(KERN_ERR "%s: failed\n", __FUNCTION__);
return 1;
}
@@ -1240,19 +1230,18 @@ static int svm_get_msr(struct kvm_vcpu *
return 0;
}
-static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
u64 data;
- if (svm_get_msr(vcpu, ecx, &data))
- svm_inject_gp(vcpu, 0);
+ if (svm_get_msr(&svm->vcpu, ecx, &data))
+ svm_inject_gp(&svm->vcpu, 0);
else {
svm->vmcb->save.rax = data & 0xffffffff;
- vcpu->regs[VCPU_REGS_RDX] = data >> 32;
+ svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
svm->next_rip = svm->vmcb->save.rip + 2;
- skip_emulated_instruction(vcpu);
+ skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
@@ -1301,29 +1290,28 @@ static int svm_set_msr(struct kvm_vcpu *
return 0;
}
-static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
u64 data = (svm->vmcb->save.rax & -1u)
- | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
+ | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
svm->next_rip = svm->vmcb->save.rip + 2;
- if (svm_set_msr(vcpu, ecx, data))
- svm_inject_gp(vcpu, 0);
+ if (svm_set_msr(&svm->vcpu, ecx, data))
+ svm_inject_gp(&svm->vcpu, 0);
else
- skip_emulated_instruction(vcpu);
+ skip_emulated_instruction(&svm->vcpu);
return 1;
}
-static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- if (to_svm(vcpu)->vmcb->control.exit_info_1)
- return wrmsr_interception(vcpu, kvm_run);
+static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ if (svm->vmcb->control.exit_info_1)
+ return wrmsr_interception(svm, kvm_run);
else
- return rdmsr_interception(vcpu, kvm_run);
-}
-
-static int interrupt_window_interception(struct kvm_vcpu *vcpu,
+ return rdmsr_interception(svm, kvm_run);
+}
+
+static int interrupt_window_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
/*
@@ -1331,8 +1319,8 @@ static int interrupt_window_interception
* possible
*/
if (kvm_run->request_interrupt_window &&
- !vcpu->irq_summary) {
- ++vcpu->stat.irq_window_exits;
+ !svm->vcpu.irq_summary) {
+ ++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
@@ -1340,7 +1328,7 @@ static int interrupt_window_interception
return 1;
}
-static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
+static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
struct kvm_run *kvm_run) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
[SVM_EXIT_READ_CR3] = emulate_on_interception,
@@ -1387,9 +1375,8 @@ static int (*svm_exit_handlers[])(struct
};
-static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int handle_exit(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
u32 exit_code = svm->vmcb->control.exit_code;
if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
@@ -1406,7 +1393,7 @@ static int handle_exit(struct kvm_vcpu *
return 0;
}
- return svm_exit_handlers[exit_code](vcpu, kvm_run);
+ return svm_exit_handlers[exit_code](svm, kvm_run);
}
static void reload_tss(struct kvm_vcpu *vcpu)
@@ -1418,80 +1405,77 @@ static void reload_tss(struct kvm_vcpu *
load_TR_desc();
}
-static void pre_svm_run(struct kvm_vcpu *vcpu)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static void pre_svm_run(struct vcpu_svm *svm)
+{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
- if (vcpu->cpu != cpu ||
+ if (svm->vcpu.cpu != cpu ||
svm->asid_generation != svm_data->asid_generation)
- new_asid(vcpu, svm_data);
-}
-
-
-static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
+ new_asid(svm, svm_data);
+}
+
+
+static inline void inject_irq(struct vcpu_svm *svm)
{
struct vmcb_control_area *control;
- control = &to_svm(vcpu)->vmcb->control;
- control->int_vector = pop_irq(vcpu);
+ control = &svm->vmcb->control;
+ control->int_vector = pop_irq(&svm->vcpu);
control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK |
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
}
-static void kvm_reput_irq(struct kvm_vcpu *vcpu)
-{
- struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+static void reput_irq(struct vcpu_svm *svm)
+{
+ struct vmcb_control_area *control = &svm->vmcb->control;
if (control->int_ctl & V_IRQ_MASK) {
control->int_ctl &= ~V_IRQ_MASK;
- push_irq(vcpu, control->int_vector);
- }
-
- vcpu->interrupt_window_open =
+ push_irq(&svm->vcpu, control->int_vector);
+ }
+
+ svm->vcpu.interrupt_window_open =
!(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
}
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+static void do_interrupt_requests(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
- vcpu->interrupt_window_open =
+ svm->vcpu.interrupt_window_open =
(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
(svm->vmcb->save.rflags & X86_EFLAGS_IF));
- if (vcpu->interrupt_window_open && vcpu->irq_summary)
+ if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
/*
* If interrupts enabled, and not blocked by sti or mov ss. Good.
*/
- kvm_do_inject_irq(vcpu);
+ inject_irq(svm);
/*
* Interrupts blocked. Wait for unblock.
*/
- if (!vcpu->interrupt_window_open &&
- (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
+ if (!svm->vcpu.interrupt_window_open &&
+ (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) {
control->intercept |= 1ULL << INTERCEPT_VINTR;
} else
control->intercept &= ~(1ULL << INTERCEPT_VINTR);
}
-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+static void post_kvm_run_save(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
- vcpu->irq_summary == 0);
+ kvm_run->ready_for_interrupt_injection
+ = (svm->vcpu.interrupt_window_open &&
+ svm->vcpu.irq_summary == 0);
kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
- kvm_run->cr8 = vcpu->cr8;
- kvm_run->apic_base = vcpu->apic_base;
+ kvm_run->cr8 = svm->vcpu.cr8;
+ kvm_run->apic_base = svm->vcpu.apic_base;
}
/*
@@ -1500,13 +1484,13 @@ static void post_kvm_run_save(struct kvm
*
* No need to exit to userspace if we already have an interrupt queued.
*/
-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+static int dm_request_for_irq_injection(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- return (!vcpu->irq_summary &&
+ return (!svm->vcpu.irq_summary &&
kvm_run->request_interrupt_window &&
- vcpu->interrupt_window_open &&
- (to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
+ svm->vcpu.interrupt_window_open &&
+ (svm->vmcb->save.rflags & X86_EFLAGS_IF));
}
static void save_db_regs(unsigned long *db_regs)
@@ -1544,7 +1528,7 @@ again:
return r;
if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
+ do_interrupt_requests(svm, kvm_run);
clgi();
@@ -1553,7 +1537,7 @@ again:
if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
svm_flush_tlb(vcpu);
- pre_svm_run(vcpu);
+ pre_svm_run(svm);
save_host_msrs(vcpu);
fs_selector = read_fs();
@@ -1713,7 +1697,7 @@ again:
stgi();
- kvm_reput_irq(vcpu);
+ reput_irq(svm);
svm->next_rip = 0;
@@ -1721,29 +1705,29 @@ again:
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= svm->vmcb->control.exit_code;
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
return 0;
}
- r = handle_exit(vcpu, kvm_run);
+ r = handle_exit(svm, kvm_run);
if (r > 0) {
if (signal_pending(current)) {
++vcpu->stat.signal_exits;
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
kvm_run->exit_reason = KVM_EXIT_INTR;
return -EINTR;
}
- if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+ if (dm_request_for_irq_injection(svm, kvm_run)) {
++vcpu->stat.request_irq_exits;
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
kvm_run->exit_reason = KVM_EXIT_INTR;
return -EINTR;
}
kvm_resched(vcpu);
goto again;
}
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
return r;
}
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header
[not found] ` <1185777179.12151.149.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 6:36 ` Rusty Russell
[not found] ` <1185777368.12151.152.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 9:03 ` [PATCH 2/4] svm: pass vcpu_svm internally Avi Kivity
1 sibling, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 6:36 UTC (permalink / raw)
To: kvm-devel
Everyone can use push_irq() and pop_irq(), so move them to common code.
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r a26d2ddb1eee drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h Fri Jul 27 18:08:57 2007 +1000
+++ b/drivers/kvm/kvm.h Fri Jul 27 18:09:38 2007 +1000
@@ -725,6 +725,24 @@ static inline u32 get_rdx_init_val(void)
return 0x600; /* P6 family */
}
+static inline u8 pop_irq(struct kvm_vcpu *vcpu)
+{
+ int word_index = __ffs(vcpu->irq_summary);
+ int bit_index = __ffs(vcpu->irq_pending[word_index]);
+ int irq = word_index * BITS_PER_LONG + bit_index;
+
+ clear_bit(bit_index, &vcpu->irq_pending[word_index]);
+ if (!vcpu->irq_pending[word_index])
+ clear_bit(word_index, &vcpu->irq_summary);
+ return irq;
+}
+
+static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
+{
+ set_bit(irq, vcpu->irq_pending);
+ set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
+}
+
#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
diff -r a26d2ddb1eee drivers/kvm/kvm_main.c
--- a/drivers/kvm/kvm_main.c Fri Jul 27 18:08:57 2007 +1000
+++ b/drivers/kvm/kvm_main.c Fri Jul 27 18:12:19 2007 +1000
@@ -2299,10 +2299,7 @@ static int kvm_vcpu_ioctl_interrupt(stru
if (irq->irq < 0 || irq->irq >= 256)
return -EINVAL;
vcpu_load(vcpu);
-
- set_bit(irq->irq, vcpu->irq_pending);
- set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
-
+ push_irq(vcpu, irq->irq);
vcpu_put(vcpu);
return 0;
diff -r a26d2ddb1eee drivers/kvm/svm.c
--- a/drivers/kvm/svm.c Fri Jul 27 18:08:57 2007 +1000
+++ b/drivers/kvm/svm.c Fri Jul 27 18:09:03 2007 +1000
@@ -110,24 +110,6 @@ static unsigned get_addr_size(struct vcp
return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 :
(cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2;
-}
-
-static inline u8 pop_irq(struct kvm_vcpu *vcpu)
-{
- int word_index = __ffs(vcpu->irq_summary);
- int bit_index = __ffs(vcpu->irq_pending[word_index]);
- int irq = word_index * BITS_PER_LONG + bit_index;
-
- clear_bit(bit_index, &vcpu->irq_pending[word_index]);
- if (!vcpu->irq_pending[word_index])
- clear_bit(word_index, &vcpu->irq_summary);
- return irq;
-}
-
-static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
-{
- set_bit(irq, vcpu->irq_pending);
- set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
}
static inline void clgi(void)
diff -r a26d2ddb1eee drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c Fri Jul 27 18:08:57 2007 +1000
+++ b/drivers/kvm/vmx.c Fri Jul 27 18:10:43 2007 +1000
@@ -1525,13 +1525,7 @@ static void inject_rmode_irq(struct kvm_
static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
{
- int word_index = __ffs(vcpu->irq_summary);
- int bit_index = __ffs(vcpu->irq_pending[word_index]);
- int irq = word_index * BITS_PER_LONG + bit_index;
-
- clear_bit(bit_index, &vcpu->irq_pending[word_index]);
- if (!vcpu->irq_pending[word_index])
- clear_bit(word_index, &vcpu->irq_summary);
+ int irq = pop_irq(vcpu);
if (vcpu->rmode.active) {
inject_rmode_irq(vcpu, irq);
@@ -1622,11 +1616,8 @@ static int handle_exception(struct kvm_v
"intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
}
- if (is_external_interrupt(vect_info)) {
- int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
- set_bit(irq, vcpu->irq_pending);
- set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
- }
+ if (is_external_interrupt(vect_info))
+ push_irq(vcpu, vect_info & VECTORING_INFO_VECTOR_MASK);
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
asm ("int $2");
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops
[not found] ` <1185777368.12151.152.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 6:39 ` Rusty Russell
[not found] ` <1185777542.12151.156.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 6:39 ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
1 sibling, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 6:39 UTC (permalink / raw)
To: kvm-devel
If we use a kmem cache for allocating vcpus, we can get the 16-byte
alignment required by fxsave & fxrstor instructions, and avoid
manually aligning the buffer.
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r 39421cbc0486 drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h Mon Jul 30 15:01:44 2007 +1000
+++ b/drivers/kvm/kvm.h Mon Jul 30 15:09:54 2007 +1000
@@ -43,10 +43,6 @@
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
-
-#define FX_IMAGE_SIZE 512
-#define FX_IMAGE_ALIGN 16
-#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
#define DE_VECTOR 0
#define NM_VECTOR 7
@@ -339,9 +335,8 @@ struct kvm_vcpu {
struct kvm_guest_debug guest_debug;
- char fx_buf[FX_BUF_SIZE];
- char *host_fx_image;
- char *guest_fx_image;
+ struct i387_fxsave_struct host_fx_image;
+ struct i387_fxsave_struct guest_fx_image;
int fpu_active;
int guest_fpu_loaded;
@@ -697,12 +692,12 @@ static inline unsigned long read_msr(uns
}
#endif
-static inline void fx_save(void *image)
+static inline void fx_save(struct i387_fxsave_struct *image)
{
asm ("fxsave (%0)":: "r" (image));
}
-static inline void fx_restore(void *image)
+static inline void fx_restore(struct i387_fxsave_struct *image)
{
asm ("fxrstor (%0)":: "r" (image));
}
diff -r 39421cbc0486 drivers/kvm/kvm_main.c
--- a/drivers/kvm/kvm_main.c Mon Jul 30 15:01:44 2007 +1000
+++ b/drivers/kvm/kvm_main.c Mon Jul 30 15:10:09 2007 +1000
@@ -218,8 +218,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu
return;
vcpu->guest_fpu_loaded = 1;
- fx_save(vcpu->host_fx_image);
- fx_restore(vcpu->guest_fx_image);
+ fx_save(&vcpu->host_fx_image);
+ fx_restore(&vcpu->guest_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
@@ -229,8 +229,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *
return;
vcpu->guest_fpu_loaded = 0;
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
@@ -317,10 +317,6 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
goto fail_free_run;
}
vcpu->pio_data = page_address(page);
-
- vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
- FX_IMAGE_ALIGN);
- vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
r = kvm_mmu_create(vcpu);
if (r < 0)
@@ -672,30 +668,20 @@ EXPORT_SYMBOL_GPL(set_cr8);
void fx_init(struct kvm_vcpu *vcpu)
{
- struct __attribute__ ((__packed__)) fx_image_s {
- u16 control; //fcw
- u16 status; //fsw
- u16 tag; // ftw
- u16 opcode; //fop
- u64 ip; // fpu ip
- u64 operand;// fpu dp
- u32 mxcsr;
- u32 mxcsr_mask;
-
- } *fx_image;
+ unsigned after_mxcsr_mask;
/* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable();
- fx_save(vcpu->host_fx_image);
+ fx_save(&vcpu->host_fx_image);
fpu_init();
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
preempt_enable();
- fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
- fx_image->mxcsr = 0x1f80;
- memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
- 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
+ after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
+ vcpu->guest_fx_image.mxcsr = 0x1f80;
+ memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
+ 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
}
EXPORT_SYMBOL_GPL(fx_init);
@@ -2414,6 +2400,9 @@ static int kvm_vm_ioctl_create_vcpu(stru
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
+ /* We do fxsave: this must be aligned. */
+ BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
vcpu_load(vcpu);
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
@@ -2526,7 +2515,7 @@ struct fxsave {
static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+ struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
@@ -2546,7 +2535,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct
static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+ struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
diff -r 39421cbc0486 drivers/kvm/svm.c
--- a/drivers/kvm/svm.c Mon Jul 30 15:01:44 2007 +1000
+++ b/drivers/kvm/svm.c Mon Jul 30 15:09:54 2007 +1000
@@ -79,6 +79,7 @@ struct svm_cpu_data {
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
static uint32_t svm_features;
+static struct kmem_cache *svm_kmem_cache;
struct svm_init_data {
int cpu;
@@ -559,7 +560,7 @@ static struct kvm_vcpu *svm_create_vcpu(
struct page *page;
int err;
- svm = kzalloc(sizeof *svm, GFP_KERNEL);
+ svm = kmem_cache_zalloc(svm_kmem_cache, GFP_KERNEL);
if (!svm) {
err = -ENOMEM;
goto out;
@@ -1540,8 +1541,8 @@ again:
}
if (vcpu->fpu_active) {
- fx_save(vcpu->host_fx_image);
- fx_restore(vcpu->guest_fx_image);
+ fx_save(&vcpu->host_fx_image);
+ fx_restore(&vcpu->guest_fx_image);
}
asm volatile (
@@ -1653,8 +1654,8 @@ again:
vcpu->guest_mode = 0;
if (vcpu->fpu_active) {
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
}
if ((svm->vmcb->save.dr7 & 0xff))
@@ -1832,12 +1833,23 @@ static struct kvm_arch_ops svm_arch_ops
static int __init svm_init(void)
{
- return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
+ int err;
+
+ /* A kmem cache lets us meet the alignment requirements of fx_save. */
+ svm_kmem_cache = KMEM_CACHE(vcpu_svm, 0);
+ if (!svm_kmem_cache)
+ return -ENOMEM;
+
+ err = kvm_init_arch(&svm_arch_ops, THIS_MODULE);
+ if (err)
+ kmem_cache_destroy(svm_kmem_cache);
+ return err;
}
static void __exit svm_exit(void)
{
kvm_exit_arch();
+ kmem_cache_destroy(svm_kmem_cache);
}
module_init(svm_init)
diff -r 39421cbc0486 drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c Mon Jul 30 15:01:44 2007 +1000
+++ b/drivers/kvm/vmx.c Mon Jul 30 15:09:54 2007 +1000
@@ -70,6 +70,7 @@ static DEFINE_PER_CPU(struct vmcs *, cur
static struct page *vmx_io_bitmap_a;
static struct page *vmx_io_bitmap_b;
+static struct kmem_cache *vmx_kmem_cache;
#ifdef CONFIG_X86_64
#define HOST_IS_64 1
@@ -2295,7 +2296,7 @@ static struct kvm_vcpu *vmx_create_vcpu(
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
- struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
+ struct vcpu_vmx *vmx = kmem_cache_zalloc(vmx_kmem_cache, GFP_KERNEL);
if (!vmx)
return ERR_PTR(-ENOMEM);
@@ -2404,6 +2405,13 @@ static int __init vmx_init(void)
goto out;
}
+ /* A kmem cache lets us meet the alignment requirements of fx_save. */
+ vmx_kmem_cache = KMEM_CACHE(vcpu_vmx, 0);
+ if (!vmx_kmem_cache) {
+ r = -ENOMEM;
+ goto out1;
+ }
+
/*
* Allow direct access to the PC debug port (it is often used for I/O
* delays, but the vmexits simply slow things down).
@@ -2419,10 +2427,12 @@ static int __init vmx_init(void)
r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
if (r)
- goto out1;
+ goto out2;
return 0;
+out2:
+ kmem_cache_destroy(vmx_kmem_cache);
out1:
__free_page(vmx_io_bitmap_b);
out:
@@ -2432,6 +2442,7 @@ out:
static void __exit vmx_exit(void)
{
+ kmem_cache_destroy(vmx_kmem_cache);
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header
[not found] ` <1185777368.12151.152.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 6:39 ` [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops Rusty Russell
@ 2007-07-30 6:39 ` Rusty Russell
1 sibling, 0 replies; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 6:39 UTC (permalink / raw)
To: kvm-devel
s/House/Hoist/
More coffee needed maybe,
Rusty.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 1/4] vmx: pass vcpu_vmx internally
[not found] ` <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 6:32 ` [PATCH 2/4] svm: pass vcpu_svm internally Rusty Russell
@ 2007-07-30 9:02 ` Avi Kivity
1 sibling, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2007-07-30 9:02 UTC (permalink / raw)
To: Rusty Russell; +Cc: kvm-devel
Rusty Russell wrote:
> container_of is wonderful, but not casting at all is better. This
> patch changes vmx.c's internal functions to pass "struct vcpu_vmx"
> instead of "struct kvm_vcpu" and using container_of.
>
> @@ -1297,9 +1288,9 @@ static void seg_setup(int seg)
> /*
> * Sets up the vmcs for emulated real mode.
> */
> -static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
> -{
> - struct vcpu_vmx *vmx = to_vmx(vcpu);
> +static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
> +{
> +
> u32 host_sysenter_cs;
> u32 junk;
> unsigned long a;
>
I fixed this up manually and applied. Please watch for these in the future.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 2/4] svm: pass vcpu_svm internally
[not found] ` <1185777179.12151.149.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 6:36 ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
@ 2007-07-30 9:03 ` Avi Kivity
[not found] ` <46ADA971.9030406-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
1 sibling, 1 reply; 16+ messages in thread
From: Avi Kivity @ 2007-07-30 9:03 UTC (permalink / raw)
To: Rusty Russell; +Cc: kvm-devel
Rusty Russell wrote:
> container_of is wonderful, but not casting at all is better. This
> patch changes svm.c's internal functions to pass "struct vcpu_svm"
> instead of "struct kvm_vcpu" and using container_of.
>
> It also changes some internal function names:
> 1) io_adress -> io_address (de-germanify the spelling)
> 2) kvm_reput_irq -> reput_irq (it's not a generic kvm function)
> 3) kvm_do_inject_irq -> (it's not a generic kvm function)
>
Please separate the renames from the de-container_of-fication.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops
[not found] ` <1185777542.12151.156.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 9:12 ` Avi Kivity
[not found] ` <46ADAB67.5070805-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
0 siblings, 1 reply; 16+ messages in thread
From: Avi Kivity @ 2007-07-30 9:12 UTC (permalink / raw)
To: Rusty Russell; +Cc: kvm-devel
Rusty Russell wrote:
> If we use a kmem cache for allocating vcpus, we can get the 16-byte
> alignment required by fxsave & fxrstor instructions, and avoid
> manually aligning the buffer.
>
>
Please separate the slabification from the fpu cleanups into separate
patches.
> Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
>
> diff -r 39421cbc0486 drivers/kvm/kvm.h
> --- a/drivers/kvm/kvm.h Mon Jul 30 15:01:44 2007 +1000
> +++ b/drivers/kvm/kvm.h Mon Jul 30 15:09:54 2007 +1000
> @@ -43,10 +43,6 @@
> #define KVM_MIN_FREE_MMU_PAGES 5
> #define KVM_REFILL_PAGES 25
> #define KVM_MAX_CPUID_ENTRIES 40
> -
> -#define FX_IMAGE_SIZE 512
> -#define FX_IMAGE_ALIGN 16
> -#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
>
> #define DE_VECTOR 0
> #define NM_VECTOR 7
> @@ -339,9 +335,8 @@ struct kvm_vcpu {
>
> struct kvm_guest_debug guest_debug;
>
> - char fx_buf[FX_BUF_SIZE];
> - char *host_fx_image;
> - char *guest_fx_image;
> + struct i387_fxsave_struct host_fx_image;
> + struct i387_fxsave_struct guest_fx_image;
>
So gcc is smart enough to propagate the alignment of i386_fxsave_struct
to its container? Nice.
>
> static int __init svm_init(void)
> {
> - return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
> + int err;
> +
> + /* A kmem cache lets us meet the alignment requirements of fx_save. */
> + svm_kmem_cache = KMEM_CACHE(vcpu_svm, 0);
>
kvm slab caches are prefixed with kvm_; this makes it easy to 'grep kvm
/proc/slabinfo' to see kvm leaks. Please preserve that.
Also, the allocation and destruction can be in common code, reducing the
amount of rarely-executed error paths.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops
[not found] ` <46ADAB67.5070805-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-07-30 10:04 ` Rusty Russell
[not found] ` <1185789882.6131.30.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 10:04 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Mon, 2007-07-30 at 12:12 +0300, Avi Kivity wrote:
> Rusty Russell wrote:
> > If we use a kmem cache for allocating vcpus, we can get the 16-byte
> > alignment required by fxsave & fxrstor instructions, and avoid
> > manually aligning the buffer.
>
> Please separate the slabification from the fpu cleanups into separate
> patches.
Hmm, are you trying to get my patch count up?
> > static int __init svm_init(void)
> > {
> > - return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
> > + int err;
> > +
> > + /* A kmem cache lets us meet the alignment requirements of fx_save. */
> > + svm_kmem_cache = KMEM_CACHE(vcpu_svm, 0);
> >
>
> kvm slab caches are prefixed with kvm_; this makes it easy to 'grep kvm
> /proc/slabinfo' to see kvm leaks. Please preserve that.
How horrible. We'd be better with KMEM_CACHE() prefixing the module
name. I'll change this to use the manual "non-recommended" way for the
moment and submit a patch later.
> Also, the allocation and destruction can be in common code, reducing the
> amount of rarely-executed error paths.
Sure, we could pass the size into the core so it can allocate the slab
cache, then have the core do the allocation itself (undoing the recent
create_vcpu change).
Is it worth it?
Rusty.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 1/2] svm de-containization
[not found] ` <46ADA971.9030406-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-07-30 10:07 ` Rusty Russell
[not found] ` <1185790028.6131.32.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 10:07 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
container_of is wonderful, but not casting at all is better. This
patch changes svm.c's internal functions to pass "struct vcpu_svm"
instead of "struct kvm_vcpu" and using container_of.
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r 5ee78d43165b drivers/kvm/svm.c
--- a/drivers/kvm/svm.c Mon Jul 30 14:26:09 2007 +1000
+++ b/drivers/kvm/svm.c Mon Jul 30 19:36:09 2007 +1000
@@ -98,9 +98,9 @@ static inline u32 svm_has(u32 feat)
return svm_features & feat;
}
-static unsigned get_addr_size(struct kvm_vcpu *vcpu)
-{
- struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save;
+static unsigned get_addr_size(struct vcpu_svm *svm)
+{
+ struct vmcb_save_area *sa = &svm->vmcb->save;
u16 cs_attrib;
if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
@@ -867,17 +867,15 @@ static void save_host_msrs(struct kvm_vc
#endif
}
-static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+{
if (svm_data->next_asid > svm_data->max_asid) {
++svm_data->asid_generation;
svm_data->next_asid = 1;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
- vcpu->cpu = svm_data->cpu;
+ svm->vcpu.cpu = svm_data->cpu;
svm->asid_generation = svm_data->asid_generation;
svm->vmcb->control.asid = svm_data->next_asid++;
}
@@ -931,42 +929,43 @@ static void svm_set_dr(struct kvm_vcpu *
}
}
-static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
u32 exit_int_info = svm->vmcb->control.exit_int_info;
+ struct kvm *kvm = svm->vcpu.kvm;
u64 fault_address;
u32 error_code;
enum emulation_result er;
int r;
if (is_external_interrupt(exit_int_info))
- push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
-
- spin_lock(&vcpu->kvm->lock);
+ push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
+
+ spin_lock(&kvm->lock);
fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1;
- r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
+ r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
if (r < 0) {
- spin_unlock(&vcpu->kvm->lock);
+ spin_unlock(&kvm->lock);
return r;
}
if (!r) {
- spin_unlock(&vcpu->kvm->lock);
+ spin_unlock(&kvm->lock);
return 1;
}
- er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
- spin_unlock(&vcpu->kvm->lock);
+ er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
+ error_code);
+ spin_unlock(&kvm->lock);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_DO_MMIO:
- ++vcpu->stat.mmio_exits;
+ ++svm->vcpu.stat.mmio_exits;
return 0;
case EMULATE_FAIL:
- vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
+ vcpu_printf(&svm->vcpu, "%s: emulate fail\n", __FUNCTION__);
break;
default:
BUG();
@@ -976,21 +975,18 @@ static int pf_interception(struct kvm_vc
return 0;
}
-static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
- if (!(vcpu->cr0 & X86_CR0_TS))
+ if (!(svm->vcpu.cr0 & X86_CR0_TS))
svm->vmcb->save.cr0 &= ~X86_CR0_TS;
- vcpu->fpu_active = 1;
+ svm->vcpu.fpu_active = 1;
return 1;
}
-static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
/*
* VMCB is undefined after a SHUTDOWN intercept
* so reinitialize it.
@@ -1002,11 +998,10 @@ static int shutdown_interception(struct
return 0;
}
-static int io_get_override(struct kvm_vcpu *vcpu,
+static int io_get_override(struct vcpu_svm *svm,
struct vmcb_seg **seg,
int *addr_override)
{
- struct vcpu_svm *svm = to_svm(vcpu);
u8 inst[MAX_INST_SIZE];
unsigned ins_length;
gva_t rip;
@@ -1026,7 +1021,7 @@ static int io_get_override(struct kvm_vc
svm->vmcb->control.exit_info_2,
ins_length);
- if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
+ if (kvm_read_guest(&svm->vcpu, rip, ins_length, inst) != ins_length)
/* #PF */
return 0;
@@ -1067,28 +1062,27 @@ static int io_get_override(struct kvm_vc
return 0;
}
-static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
+static unsigned long io_adress(struct vcpu_svm *svm, int ins, gva_t *address)
{
unsigned long addr_mask;
unsigned long *reg;
struct vmcb_seg *seg;
int addr_override;
- struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_save_area *save_area = &svm->vmcb->save;
u16 cs_attrib = save_area->cs.attrib;
- unsigned addr_size = get_addr_size(vcpu);
-
- if (!io_get_override(vcpu, &seg, &addr_override))
+ unsigned addr_size = get_addr_size(svm);
+
+ if (!io_get_override(svm, &seg, &addr_override))
return 0;
if (addr_override)
addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
if (ins) {
- reg = &vcpu->regs[VCPU_REGS_RDI];
+ reg = &svm->vcpu.regs[VCPU_REGS_RDI];
seg = &svm->vmcb->save.es;
} else {
- reg = &vcpu->regs[VCPU_REGS_RSI];
+ reg = &svm->vcpu.regs[VCPU_REGS_RSI];
seg = (seg) ? seg : &svm->vmcb->save.ds;
}
@@ -1101,7 +1095,7 @@ static unsigned long io_adress(struct kv
}
if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
- svm_inject_gp(vcpu, 0);
+ svm_inject_gp(&svm->vcpu, 0);
return 0;
}
@@ -1109,16 +1103,15 @@ static unsigned long io_adress(struct kv
return addr_mask;
}
-static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
int size, down, in, string, rep;
unsigned port;
unsigned long count;
gva_t address = 0;
- ++vcpu->stat.io_exits;
+ ++svm->vcpu.stat.io_exits;
svm->next_rip = svm->vmcb->control.exit_info_2;
@@ -1133,7 +1126,7 @@ static int io_interception(struct kvm_vc
if (string) {
unsigned addr_mask;
- addr_mask = io_adress(vcpu, in, &address);
+ addr_mask = io_adress(svm, in, &address);
if (!addr_mask) {
printk(KERN_DEBUG "%s: get io address failed\n",
__FUNCTION__);
@@ -1141,60 +1134,57 @@ static int io_interception(struct kvm_vc
}
if (rep)
- count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
- }
- return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
- address, rep, port);
-}
-
-static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ count = svm->vcpu.regs[VCPU_REGS_RCX] & addr_mask;
+ }
+ return kvm_setup_pio(&svm->vcpu, kvm_run, in, size, count, string,
+ down, address, rep, port);
+}
+
+static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
return 1;
}
-static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->next_rip = svm->vmcb->save.rip + 1;
- skip_emulated_instruction(vcpu);
- return kvm_emulate_halt(vcpu);
-}
-
-static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+ skip_emulated_instruction(&svm->vcpu);
+ return kvm_emulate_halt(&svm->vcpu);
+}
+
+static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->next_rip = svm->vmcb->save.rip + 3;
- skip_emulated_instruction(vcpu);
- return kvm_hypercall(vcpu, kvm_run);
-}
-
-static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- inject_ud(vcpu);
+ skip_emulated_instruction(&svm->vcpu);
+ return kvm_hypercall(&svm->vcpu, kvm_run);
+}
+
+static int invalid_op_interception(struct vcpu_svm *svm,
+ struct kvm_run *kvm_run)
+{
+ inject_ud(&svm->vcpu);
return 1;
}
-static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int task_switch_interception(struct vcpu_svm *svm,
+ struct kvm_run *kvm_run)
{
printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
return 0;
}
-static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
+static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
svm->next_rip = svm->vmcb->save.rip + 2;
- kvm_emulate_cpuid(vcpu);
+ kvm_emulate_cpuid(&svm->vcpu);
return 1;
}
-static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
+static int emulate_on_interception(struct vcpu_svm *svm,
+ struct kvm_run *kvm_run)
+{
+ if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
printk(KERN_ERR "%s: failed\n", __FUNCTION__);
return 1;
}
@@ -1243,19 +1233,18 @@ static int svm_get_msr(struct kvm_vcpu *
return 0;
}
-static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
u64 data;
- if (svm_get_msr(vcpu, ecx, &data))
- svm_inject_gp(vcpu, 0);
+ if (svm_get_msr(&svm->vcpu, ecx, &data))
+ svm_inject_gp(&svm->vcpu, 0);
else {
svm->vmcb->save.rax = data & 0xffffffff;
- vcpu->regs[VCPU_REGS_RDX] = data >> 32;
+ svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
svm->next_rip = svm->vmcb->save.rip + 2;
- skip_emulated_instruction(vcpu);
+ skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
@@ -1304,29 +1293,28 @@ static int svm_set_msr(struct kvm_vcpu *
return 0;
}
-static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
u64 data = (svm->vmcb->save.rax & -1u)
- | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
+ | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
svm->next_rip = svm->vmcb->save.rip + 2;
- if (svm_set_msr(vcpu, ecx, data))
- svm_inject_gp(vcpu, 0);
+ if (svm_set_msr(&svm->vcpu, ecx, data))
+ svm_inject_gp(&svm->vcpu, 0);
else
- skip_emulated_instruction(vcpu);
+ skip_emulated_instruction(&svm->vcpu);
return 1;
}
-static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- if (to_svm(vcpu)->vmcb->control.exit_info_1)
- return wrmsr_interception(vcpu, kvm_run);
+static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ if (svm->vmcb->control.exit_info_1)
+ return wrmsr_interception(svm, kvm_run);
else
- return rdmsr_interception(vcpu, kvm_run);
-}
-
-static int interrupt_window_interception(struct kvm_vcpu *vcpu,
+ return rdmsr_interception(svm, kvm_run);
+}
+
+static int interrupt_window_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
/*
@@ -1334,8 +1322,8 @@ static int interrupt_window_interception
* possible
*/
if (kvm_run->request_interrupt_window &&
- !vcpu->irq_summary) {
- ++vcpu->stat.irq_window_exits;
+ !svm->vcpu.irq_summary) {
+ ++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
@@ -1343,7 +1331,7 @@ static int interrupt_window_interception
return 1;
}
-static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
+static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
struct kvm_run *kvm_run) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
[SVM_EXIT_READ_CR3] = emulate_on_interception,
@@ -1390,9 +1378,8 @@ static int (*svm_exit_handlers[])(struct
};
-static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static int handle_exit(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
u32 exit_code = svm->vmcb->control.exit_code;
if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
@@ -1409,7 +1396,7 @@ static int handle_exit(struct kvm_vcpu *
return 0;
}
- return svm_exit_handlers[exit_code](vcpu, kvm_run);
+ return svm_exit_handlers[exit_code](svm, kvm_run);
}
static void reload_tss(struct kvm_vcpu *vcpu)
@@ -1421,80 +1408,77 @@ static void reload_tss(struct kvm_vcpu *
load_TR_desc();
}
-static void pre_svm_run(struct kvm_vcpu *vcpu)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+static void pre_svm_run(struct vcpu_svm *svm)
+{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
- if (vcpu->cpu != cpu ||
+ if (svm->vcpu.cpu != cpu ||
svm->asid_generation != svm_data->asid_generation)
- new_asid(vcpu, svm_data);
-}
-
-
-static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
+ new_asid(svm, svm_data);
+}
+
+
+static inline void kvm_do_inject_irq(struct vcpu_svm *svm)
{
struct vmcb_control_area *control;
- control = &to_svm(vcpu)->vmcb->control;
- control->int_vector = pop_irq(vcpu);
+ control = &svm->vmcb->control;
+ control->int_vector = pop_irq(&svm->vcpu);
control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK |
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
}
-static void kvm_reput_irq(struct kvm_vcpu *vcpu)
-{
- struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+static void kvm_reput_irq(struct vcpu_svm *svm)
+{
+ struct vmcb_control_area *control = &svm->vmcb->control;
if (control->int_ctl & V_IRQ_MASK) {
control->int_ctl &= ~V_IRQ_MASK;
- push_irq(vcpu, control->int_vector);
- }
-
- vcpu->interrupt_window_open =
+ push_irq(&svm->vcpu, control->int_vector);
+ }
+
+ svm->vcpu.interrupt_window_open =
!(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
}
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+static void do_interrupt_requests(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
- vcpu->interrupt_window_open =
+ svm->vcpu.interrupt_window_open =
(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
(svm->vmcb->save.rflags & X86_EFLAGS_IF));
- if (vcpu->interrupt_window_open && vcpu->irq_summary)
+ if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
/*
* If interrupts enabled, and not blocked by sti or mov ss. Good.
*/
- kvm_do_inject_irq(vcpu);
+ kvm_do_inject_irq(svm);
/*
* Interrupts blocked. Wait for unblock.
*/
- if (!vcpu->interrupt_window_open &&
- (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
+ if (!svm->vcpu.interrupt_window_open &&
+ (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) {
control->intercept |= 1ULL << INTERCEPT_VINTR;
} else
control->intercept &= ~(1ULL << INTERCEPT_VINTR);
}
-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+static void post_kvm_run_save(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
- vcpu->irq_summary == 0);
+ kvm_run->ready_for_interrupt_injection
+ = (svm->vcpu.interrupt_window_open &&
+ svm->vcpu.irq_summary == 0);
kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
- kvm_run->cr8 = vcpu->cr8;
- kvm_run->apic_base = vcpu->apic_base;
+ kvm_run->cr8 = svm->vcpu.cr8;
+ kvm_run->apic_base = svm->vcpu.apic_base;
}
/*
@@ -1503,13 +1487,13 @@ static void post_kvm_run_save(struct kvm
*
* No need to exit to userspace if we already have an interrupt queued.
*/
-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+static int dm_request_for_irq_injection(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- return (!vcpu->irq_summary &&
+ return (!svm->vcpu.irq_summary &&
kvm_run->request_interrupt_window &&
- vcpu->interrupt_window_open &&
- (to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
+ svm->vcpu.interrupt_window_open &&
+ (svm->vmcb->save.rflags & X86_EFLAGS_IF));
}
static void save_db_regs(unsigned long *db_regs)
@@ -1547,7 +1531,7 @@ again:
return r;
if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
+ do_interrupt_requests(svm, kvm_run);
clgi();
@@ -1556,7 +1540,7 @@ again:
if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
svm_flush_tlb(vcpu);
- pre_svm_run(vcpu);
+ pre_svm_run(svm);
save_host_msrs(vcpu);
fs_selector = read_fs();
@@ -1716,7 +1700,7 @@ again:
stgi();
- kvm_reput_irq(vcpu);
+ kvm_reput_irq(svm);
svm->next_rip = 0;
@@ -1724,29 +1708,29 @@ again:
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= svm->vmcb->control.exit_code;
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
return 0;
}
- r = handle_exit(vcpu, kvm_run);
+ r = handle_exit(svm, kvm_run);
if (r > 0) {
if (signal_pending(current)) {
++vcpu->stat.signal_exits;
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
kvm_run->exit_reason = KVM_EXIT_INTR;
return -EINTR;
}
- if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+ if (dm_request_for_irq_injection(svm, kvm_run)) {
++vcpu->stat.request_irq_exits;
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
kvm_run->exit_reason = KVM_EXIT_INTR;
return -EINTR;
}
kvm_resched(vcpu);
goto again;
}
- post_kvm_run_save(vcpu, kvm_run);
+ post_kvm_run_save(svm, kvm_run);
return r;
}
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 2/2] svm internal function name cleanup
[not found] ` <1185790028.6131.32.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 10:08 ` Rusty Russell
2007-07-30 10:22 ` [PATCH 1/2] svm de-containization Avi Kivity
1 sibling, 0 replies; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 10:08 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
Changes some svm.c internal function names:
1) io_adress -> io_address (de-germanify the spelling)
2) kvm_reput_irq -> reput_irq (it's not a generic kvm function)
3) kvm_do_inject_irq -> (it's not a generic kvm function)
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r 1bb631b5d298 drivers/kvm/svm.c
--- a/drivers/kvm/svm.c Mon Jul 30 19:40:07 2007 +1000
+++ b/drivers/kvm/svm.c Mon Jul 30 19:44:20 2007 +1000
@@ -1062,7 +1062,7 @@ static int io_get_override(struct vcpu_s
return 0;
}
-static unsigned long io_adress(struct vcpu_svm *svm, int ins, gva_t *address)
+static unsigned long io_address(struct vcpu_svm *svm, int ins, gva_t *address)
{
unsigned long addr_mask;
unsigned long *reg;
@@ -1126,7 +1126,7 @@ static int io_interception(struct vcpu_s
if (string) {
unsigned addr_mask;
- addr_mask = io_adress(svm, in, &address);
+ addr_mask = io_address(svm, in, &address);
if (!addr_mask) {
printk(KERN_DEBUG "%s: get io address failed\n",
__FUNCTION__);
@@ -1421,7 +1421,7 @@ static void pre_svm_run(struct vcpu_svm
}
-static inline void kvm_do_inject_irq(struct vcpu_svm *svm)
+static inline void inject_irq(struct vcpu_svm *svm)
{
struct vmcb_control_area *control;
@@ -1432,7 +1432,7 @@ static inline void kvm_do_inject_irq(str
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
}
-static void kvm_reput_irq(struct vcpu_svm *svm)
+static void reput_irq(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1458,7 +1458,7 @@ static void do_interrupt_requests(struct
/*
* If interrupts enabled, and not blocked by sti or mov ss. Good.
*/
- kvm_do_inject_irq(svm);
+ inject_irq(svm);
/*
* Interrupts blocked. Wait for unblock.
@@ -1700,7 +1700,7 @@ again:
stgi();
- kvm_reput_irq(svm);
+ reput_irq(svm);
svm->next_rip = 0;
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 1/2] svm de-containization
[not found] ` <1185790028.6131.32.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 10:08 ` [PATCH 2/2] svm internal function name cleanup Rusty Russell
@ 2007-07-30 10:22 ` Avi Kivity
1 sibling, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2007-07-30 10:22 UTC (permalink / raw)
To: Rusty Russell; +Cc: kvm-devel
Rusty Russell wrote:
> container_of is wonderful, but not casting at all is better. This
> patch changes svm.c's internal functions to pass "struct vcpu_svm"
> instead of "struct kvm_vcpu" and using container_of.
>
>
Applied both; thanks.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops
[not found] ` <1185789882.6131.30.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 10:26 ` Avi Kivity
[not found] ` <46ADBCBC.9050207-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
0 siblings, 1 reply; 16+ messages in thread
From: Avi Kivity @ 2007-07-30 10:26 UTC (permalink / raw)
To: Rusty Russell; +Cc: kvm-devel
Rusty Russell wrote:
> On Mon, 2007-07-30 at 12:12 +0300, Avi Kivity wrote:
>
>> Rusty Russell wrote:
>>
>>> If we use a kmem cache for allocating vcpus, we can get the 16-byte
>>> alignment required by fxsave & fxrstor instructions, and avoid
>>> manually aligning the buffer.
>>>
>> Please separate the slabification from the fpu cleanups into separate
>> patches.
>>
>
> Hmm, are you trying to get my patch count up?
>
>
I get a kickback on my signoffs.
>> Also, the allocation and destruction can be in common code, reducing the
>> amount of rarely-executed error paths.
>>
>
> Sure, we could pass the size into the core so it can allocate the slab
> cache, then have the core do the allocation itself (undoing the recent
> create_vcpu change).
>
> Is it worth it?
>
It isn't critical, but nice to have. I'd like to keep the code small.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 1/2] Use kmem cache for allocating vcpus
[not found] ` <46ADBCBC.9050207-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-07-30 11:12 ` Rusty Russell
[not found] ` <1185793939.6131.38.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 11:12 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
Avi wants the allocations of vcpus centralized again. The easiest way
is to add a "size" arg to kvm_init_arch, and expose the thus-prepared
cache to the modules.
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r 650abbc45974 drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h Mon Jul 30 20:49:49 2007 +1000
+++ b/drivers/kvm/kvm.h Mon Jul 30 21:01:41 2007 +1000
@@ -140,6 +140,7 @@ struct kvm_mmu_page {
};
struct kvm_vcpu;
+extern struct kmem_cache *kvm_vcpu_cache;
/*
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
@@ -481,7 +482,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
-int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
+int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
+ struct module *module);
void kvm_exit_arch(void);
int kvm_mmu_module_init(void);
diff -r 650abbc45974 drivers/kvm/kvm_main.c
--- a/drivers/kvm/kvm_main.c Mon Jul 30 20:49:49 2007 +1000
+++ b/drivers/kvm/kvm_main.c Mon Jul 30 21:05:13 2007 +1000
@@ -53,6 +53,8 @@ static cpumask_t cpus_hardware_enabled;
static cpumask_t cpus_hardware_enabled;
struct kvm_arch_ops *kvm_arch_ops;
+struct kmem_cache *kvm_vcpu_cache;
+EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
@@ -3142,7 +3144,8 @@ static struct sys_device kvm_sysdev = {
hpa_t bad_page_address;
-int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
+int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
+ struct module *module)
{
int r;
@@ -3180,6 +3183,14 @@ int kvm_init_arch(struct kvm_arch_ops *o
if (r)
goto out_free_3;
+ /* A kmem cache lets us meet the alignment requirements of fx_save. */
+ kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
+ __alignof__(struct kvm_vcpu), 0, 0);
+ if (!kvm_vcpu_cache) {
+ r = -ENOMEM;
+ goto out_free_4;
+ }
+
kvm_chardev_ops.owner = module;
r = misc_register(&kvm_dev);
@@ -3191,6 +3202,8 @@ int kvm_init_arch(struct kvm_arch_ops *o
return r;
out_free:
+ kmem_cache_destroy(kvm_vcpu_cache);
+out_free_4:
sysdev_unregister(&kvm_sysdev);
out_free_3:
sysdev_class_unregister(&kvm_sysdev_class);
@@ -3208,6 +3221,7 @@ void kvm_exit_arch(void)
void kvm_exit_arch(void)
{
misc_deregister(&kvm_dev);
+ kmem_cache_destroy(kvm_vcpu_cache);
sysdev_unregister(&kvm_sysdev);
sysdev_class_unregister(&kvm_sysdev_class);
unregister_reboot_notifier(&kvm_reboot_notifier);
diff -r 650abbc45974 drivers/kvm/svm.c
--- a/drivers/kvm/svm.c Mon Jul 30 20:49:49 2007 +1000
+++ b/drivers/kvm/svm.c Mon Jul 30 21:00:05 2007 +1000
@@ -559,7 +559,7 @@ static struct kvm_vcpu *svm_create_vcpu(
struct page *page;
int err;
- svm = kzalloc(sizeof *svm, GFP_KERNEL);
+ svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!svm) {
err = -ENOMEM;
goto out;
@@ -1832,7 +1832,8 @@ static struct kvm_arch_ops svm_arch_ops
static int __init svm_init(void)
{
- return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
+ return kvm_init_arch(&svm_arch_ops, sizeof(struct vcpu_svm),
+ THIS_MODULE);
}
static void __exit svm_exit(void)
diff -r 650abbc45974 drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c Mon Jul 30 20:49:49 2007 +1000
+++ b/drivers/kvm/vmx.c Mon Jul 30 21:08:52 2007 +1000
@@ -2295,7 +2295,7 @@ static struct kvm_vcpu *vmx_create_vcpu(
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
- struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
+ struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vmx)
return ERR_PTR(-ENOMEM);
@@ -2417,7 +2417,7 @@ static int __init vmx_init(void)
memset(iova, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
- r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+ r = kvm_init_arch(&vmx_arch_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
goto out1;
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 2/2] Use alignment properties of vcpu to simplify FPU ops
[not found] ` <1185793939.6131.38.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 11:13 ` Rusty Russell
[not found] ` <1185794023.6131.41.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-07-30 11:13 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
Now we use a kmem cache for allocating vcpus, we can get the 16-byte
alignment required by fxsave & fxrstor instructions, and avoid
manually aligning the buffer.
Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
diff -r 39421cbc0486 drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h Mon Jul 30 15:01:44 2007 +1000
+++ b/drivers/kvm/kvm.h Mon Jul 30 15:09:54 2007 +1000
@@ -43,10 +43,6 @@
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
-
-#define FX_IMAGE_SIZE 512
-#define FX_IMAGE_ALIGN 16
-#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
#define DE_VECTOR 0
#define NM_VECTOR 7
@@ -339,9 +335,8 @@ struct kvm_vcpu {
struct kvm_guest_debug guest_debug;
- char fx_buf[FX_BUF_SIZE];
- char *host_fx_image;
- char *guest_fx_image;
+ struct i387_fxsave_struct host_fx_image;
+ struct i387_fxsave_struct guest_fx_image;
int fpu_active;
int guest_fpu_loaded;
@@ -697,12 +692,12 @@ static inline unsigned long read_msr(uns
}
#endif
-static inline void fx_save(void *image)
+static inline void fx_save(struct i387_fxsave_struct *image)
{
asm ("fxsave (%0)":: "r" (image));
}
-static inline void fx_restore(void *image)
+static inline void fx_restore(struct i387_fxsave_struct *image)
{
asm ("fxrstor (%0)":: "r" (image));
}
diff -r 39421cbc0486 drivers/kvm/kvm_main.c
--- a/drivers/kvm/kvm_main.c Mon Jul 30 15:01:44 2007 +1000
+++ b/drivers/kvm/kvm_main.c Mon Jul 30 15:10:09 2007 +1000
@@ -218,8 +218,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu
return;
vcpu->guest_fpu_loaded = 1;
- fx_save(vcpu->host_fx_image);
- fx_restore(vcpu->guest_fx_image);
+ fx_save(&vcpu->host_fx_image);
+ fx_restore(&vcpu->guest_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
@@ -229,8 +229,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *
return;
vcpu->guest_fpu_loaded = 0;
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
@@ -317,10 +317,6 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
goto fail_free_run;
}
vcpu->pio_data = page_address(page);
-
- vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
- FX_IMAGE_ALIGN);
- vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
r = kvm_mmu_create(vcpu);
if (r < 0)
@@ -672,30 +668,20 @@ EXPORT_SYMBOL_GPL(set_cr8);
void fx_init(struct kvm_vcpu *vcpu)
{
- struct __attribute__ ((__packed__)) fx_image_s {
- u16 control; //fcw
- u16 status; //fsw
- u16 tag; // ftw
- u16 opcode; //fop
- u64 ip; // fpu ip
- u64 operand;// fpu dp
- u32 mxcsr;
- u32 mxcsr_mask;
-
- } *fx_image;
+ unsigned after_mxcsr_mask;
/* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable();
- fx_save(vcpu->host_fx_image);
+ fx_save(&vcpu->host_fx_image);
fpu_init();
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
preempt_enable();
- fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
- fx_image->mxcsr = 0x1f80;
- memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
- 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
+ after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
+ vcpu->guest_fx_image.mxcsr = 0x1f80;
+ memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
+ 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
}
EXPORT_SYMBOL_GPL(fx_init);
@@ -2414,6 +2400,9 @@ static int kvm_vm_ioctl_create_vcpu(stru
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
+ /* We do fxsave: this must be aligned. */
+ BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
vcpu_load(vcpu);
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
@@ -2526,7 +2515,7 @@ struct fxsave {
static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+ struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
@@ -2546,7 +2535,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct
static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+ struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
diff -r 39421cbc0486 drivers/kvm/svm.c
--- a/drivers/kvm/svm.c Mon Jul 30 15:01:44 2007 +1000
+++ b/drivers/kvm/svm.c Mon Jul 30 15:09:54 2007 +1000
@@ -1540,8 +1541,8 @@ again:
}
if (vcpu->fpu_active) {
- fx_save(vcpu->host_fx_image);
- fx_restore(vcpu->guest_fx_image);
+ fx_save(&vcpu->host_fx_image);
+ fx_restore(&vcpu->guest_fx_image);
}
asm volatile (
@@ -1653,8 +1654,8 @@ again:
vcpu->guest_mode = 0;
if (vcpu->fpu_active) {
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
}
if ((svm->vmcb->save.dr7 & 0xff))
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 2/2] Use alignment properties of vcpu to simplify FPU ops
[not found] ` <1185794023.6131.41.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2007-07-30 13:09 ` Avi Kivity
0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2007-07-30 13:09 UTC (permalink / raw)
To: Rusty Russell; +Cc: kvm-devel
Rusty Russell wrote:
> Now we use a kmem cache for allocating vcpus, we can get the 16-byte
> alignment required by fxsave & fxrstor instructions, and avoid
> manually aligning the buffer.
>
>
Thanks -- applied both.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 16+ messages in thread
end of thread, other threads:[~2007-07-30 13:09 UTC | newest]
Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-07-30 6:31 [PATCH 1/4] vmx: pass vcpu_vmx internally Rusty Russell
[not found] ` <1185777103.12151.147.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 6:32 ` [PATCH 2/4] svm: pass vcpu_svm internally Rusty Russell
[not found] ` <1185777179.12151.149.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 6:36 ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
[not found] ` <1185777368.12151.152.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 6:39 ` [PATCH 4/4] Use kmem cache for allocating vcpus, simplify FPU ops Rusty Russell
[not found] ` <1185777542.12151.156.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 9:12 ` Avi Kivity
[not found] ` <46ADAB67.5070805-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 10:04 ` Rusty Russell
[not found] ` <1185789882.6131.30.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 10:26 ` Avi Kivity
[not found] ` <46ADBCBC.9050207-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 11:12 ` [PATCH 1/2] Use kmem cache for allocating vcpus Rusty Russell
[not found] ` <1185793939.6131.38.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 11:13 ` [PATCH 2/2] Use alignment properties of vcpu to simplify FPU ops Rusty Russell
[not found] ` <1185794023.6131.41.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 13:09 ` Avi Kivity
2007-07-30 6:39 ` [PATCH 3/4] House svm.c's pop_irq and push_irq helpers into generic header Rusty Russell
2007-07-30 9:03 ` [PATCH 2/4] svm: pass vcpu_svm internally Avi Kivity
[not found] ` <46ADA971.9030406-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-30 10:07 ` [PATCH 1/2] svm de-containization Rusty Russell
[not found] ` <1185790028.6131.32.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-30 10:08 ` [PATCH 2/2] svm internal function name cleanup Rusty Russell
2007-07-30 10:22 ` [PATCH 1/2] svm de-containization Avi Kivity
2007-07-30 9:02 ` [PATCH 1/4] vmx: pass vcpu_vmx internally Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox