* [PATCH 01/21] kvm/svm: add helper functions for global interrupt flag
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 02/21] kvm/svm: optimize nested #vmexit Joerg Roedel
` (20 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This patch makes the code easier to read when it comes to setting,
clearing and checking the status of the virtualized global
interrupt flag for the VCPU.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 33 +++++++++++++++++++++++++--------
1 files changed, 25 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 70e81f5..8eb0852 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -130,6 +130,21 @@ static inline bool is_nested(struct vcpu_svm *svm)
return svm->nested_vmcb;
}
+static inline void enable_gif(struct vcpu_svm *svm)
+{
+ svm->vcpu.arch.hflags |= HF_GIF_MASK;
+}
+
+static inline void disable_gif(struct vcpu_svm *svm)
+{
+ svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
+}
+
+static inline bool gif_set(struct vcpu_svm *svm)
+{
+ return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
+}
+
static unsigned long iopm_base;
struct kvm_ldttss_desc {
@@ -624,7 +639,9 @@ static void init_vmcb(struct vcpu_svm *svm)
force_new_asid(&svm->vcpu);
svm->nested_vmcb = 0;
- svm->vcpu.arch.hflags = HF_GIF_MASK;
+ svm->vcpu.arch.hflags = 0;
+
+ enable_gif(svm);
}
static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -1632,7 +1649,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
- svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
+ disable_gif(svm);
/* Exit nested SVM mode */
svm->nested_vmcb = 0;
@@ -1764,7 +1781,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
- svm->vcpu.arch.hflags |= HF_GIF_MASK;
+ enable_gif(svm);
return 0;
}
@@ -1853,7 +1870,7 @@ static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- svm->vcpu.arch.hflags |= HF_GIF_MASK;
+ enable_gif(svm);
return 1;
}
@@ -1866,7 +1883,7 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
+ disable_gif(svm);
/* After a CLGI no interrupts should come */
svm_clear_vintr(svm);
@@ -2353,7 +2370,7 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- BUG_ON(!(svm->vcpu.arch.hflags & HF_GIF_MASK));
+ BUG_ON(!(gif_set(svm)));
svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
@@ -2384,7 +2401,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
struct vmcb *vmcb = svm->vmcb;
return (vmcb->save.rflags & X86_EFLAGS_IF) &&
!(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
- (svm->vcpu.arch.hflags & HF_GIF_MASK) &&
+ gif_set(svm) &&
!is_nested(svm);
}
@@ -2399,7 +2416,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
* GIF becomes 1, because that's a separate STGI/VMRUN intercept.
* The next time we get that intercept, this function will be
* called again though and we'll get the vintr intercept. */
- if (svm->vcpu.arch.hflags & HF_GIF_MASK) {
+ if (gif_set(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 02/21] kvm/svm: optimize nested #vmexit
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
2009-08-07 9:49 ` [PATCH 01/21] kvm/svm: add helper functions for global interrupt flag Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 03/21] kvm/svm: optimize nested vmrun Joerg Roedel
` (19 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
It is more efficient to copy only the relevant parts of the vmcb back to
the nested vmcb when we emulate an vmexit.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
arch/x86/kvm/svm.c | 68 +++++++++++++++++++++++++--------------------------
1 files changed, 33 insertions(+), 35 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8eb0852..02ebc3f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1575,53 +1575,52 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
{
struct vmcb *nested_vmcb = (struct vmcb *)arg1;
struct vmcb *hsave = svm->hsave;
- u64 nested_save[] = { nested_vmcb->save.cr0,
- nested_vmcb->save.cr3,
- nested_vmcb->save.cr4,
- nested_vmcb->save.efer,
- nested_vmcb->control.intercept_cr_read,
- nested_vmcb->control.intercept_cr_write,
- nested_vmcb->control.intercept_dr_read,
- nested_vmcb->control.intercept_dr_write,
- nested_vmcb->control.intercept_exceptions,
- nested_vmcb->control.intercept,
- nested_vmcb->control.msrpm_base_pa,
- nested_vmcb->control.iopm_base_pa,
- nested_vmcb->control.tsc_offset };
+ struct vmcb *vmcb = svm->vmcb;
/* Give the current vmcb to the guest */
- memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb));
- nested_vmcb->save.cr0 = nested_save[0];
- if (!npt_enabled)
- nested_vmcb->save.cr3 = nested_save[1];
- nested_vmcb->save.cr4 = nested_save[2];
- nested_vmcb->save.efer = nested_save[3];
- nested_vmcb->control.intercept_cr_read = nested_save[4];
- nested_vmcb->control.intercept_cr_write = nested_save[5];
- nested_vmcb->control.intercept_dr_read = nested_save[6];
- nested_vmcb->control.intercept_dr_write = nested_save[7];
- nested_vmcb->control.intercept_exceptions = nested_save[8];
- nested_vmcb->control.intercept = nested_save[9];
- nested_vmcb->control.msrpm_base_pa = nested_save[10];
- nested_vmcb->control.iopm_base_pa = nested_save[11];
- nested_vmcb->control.tsc_offset = nested_save[12];
+ disable_gif(svm);
+
+ nested_vmcb->save.es = vmcb->save.es;
+ nested_vmcb->save.cs = vmcb->save.cs;
+ nested_vmcb->save.ss = vmcb->save.ss;
+ nested_vmcb->save.ds = vmcb->save.ds;
+ nested_vmcb->save.gdtr = vmcb->save.gdtr;
+ nested_vmcb->save.idtr = vmcb->save.idtr;
+ if (npt_enabled)
+ nested_vmcb->save.cr3 = vmcb->save.cr3;
+ nested_vmcb->save.cr2 = vmcb->save.cr2;
+ nested_vmcb->save.rflags = vmcb->save.rflags;
+ nested_vmcb->save.rip = vmcb->save.rip;
+ nested_vmcb->save.rsp = vmcb->save.rsp;
+ nested_vmcb->save.rax = vmcb->save.rax;
+ nested_vmcb->save.dr7 = vmcb->save.dr7;
+ nested_vmcb->save.dr6 = vmcb->save.dr6;
+ nested_vmcb->save.cpl = vmcb->save.cpl;
+
+ nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
+ nested_vmcb->control.int_vector = vmcb->control.int_vector;
+ nested_vmcb->control.int_state = vmcb->control.int_state;
+ nested_vmcb->control.exit_code = vmcb->control.exit_code;
+ nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
+ nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
+ nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
+ nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
+ nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
+ nested_vmcb->control.tlb_ctl = 0;
+ nested_vmcb->control.event_inj = 0;
+ nested_vmcb->control.event_inj_err = 0;
/* We always set V_INTR_MASKING and remember the old value in hflags */
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
- if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) &&
- (nested_vmcb->control.int_vector)) {
- nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n",
- nested_vmcb->control.int_vector);
- }
-
/* Restore the original control entries */
svm->vmcb->control = hsave->control;
/* Kill any pending exceptions */
if (svm->vcpu.arch.exception.pending == true)
nsvm_printk("WARNING: Pending Exception\n");
+
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
@@ -1649,7 +1648,6 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
- disable_gif(svm);
/* Exit nested SVM mode */
svm->nested_vmcb = 0;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 03/21] kvm/svm: optimize nested vmrun
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
2009-08-07 9:49 ` [PATCH 01/21] kvm/svm: add helper functions for global interrupt flag Joerg Roedel
2009-08-07 9:49 ` [PATCH 02/21] kvm/svm: optimize nested #vmexit Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 04/21] kvm/svm: copy only necessary parts of the control area on vmrun/vmexit Joerg Roedel
` (18 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
Only copy the necessary parts of the vmcb save area on vmrun and save
precious time.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
arch/x86/kvm/svm.c | 28 +++++++++++++++++++++-------
1 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 02ebc3f..d4011cc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1684,6 +1684,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
{
struct vmcb *nested_vmcb = (struct vmcb *)arg1;
struct vmcb *hsave = svm->hsave;
+ struct vmcb *vmcb = svm->vmcb;
/* nested_vmcb is our indicator if nested SVM is activated */
svm->nested_vmcb = svm->vmcb->save.rax;
@@ -1694,12 +1695,25 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
/* Save the old vmcb, so we don't need to pick what we save, but
can restore everything when a VMEXIT occurs */
- memcpy(hsave, svm->vmcb, sizeof(struct vmcb));
- /* We need to remember the original CR3 in the SPT case */
- if (!npt_enabled)
- hsave->save.cr3 = svm->vcpu.arch.cr3;
- hsave->save.cr4 = svm->vcpu.arch.cr4;
- hsave->save.rip = svm->next_rip;
+ hsave->save.es = vmcb->save.es;
+ hsave->save.cs = vmcb->save.cs;
+ hsave->save.ss = vmcb->save.ss;
+ hsave->save.ds = vmcb->save.ds;
+ hsave->save.gdtr = vmcb->save.gdtr;
+ hsave->save.idtr = vmcb->save.idtr;
+ hsave->save.efer = svm->vcpu.arch.shadow_efer;
+ hsave->save.cr0 = svm->vcpu.arch.cr0;
+ hsave->save.cr4 = svm->vcpu.arch.cr4;
+ hsave->save.rflags = vmcb->save.rflags;
+ hsave->save.rip = svm->next_rip;
+ hsave->save.rsp = vmcb->save.rsp;
+ hsave->save.rax = vmcb->save.rax;
+ if (npt_enabled)
+ hsave->save.cr3 = vmcb->save.cr3;
+ else
+ hsave->save.cr3 = svm->vcpu.arch.cr3;
+
+ hsave->control = vmcb->control;
if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
@@ -1724,7 +1738,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
kvm_mmu_reset_context(&svm->vcpu);
}
- svm->vmcb->save.cr2 = nested_vmcb->save.cr2;
+ svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 04/21] kvm/svm: copy only necessary parts of the control area on vmrun/vmexit
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (2 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 03/21] kvm/svm: optimize nested vmrun Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 05/21] kvm/svm: complete interrupts after handling nested exits Joerg Roedel
` (17 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
The vmcb control area contains more then 800 bytes of reserved fields
which are unnecessarily copied. Fix this by introducing a copy
function which only copies the relevant part and saves time.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
arch/x86/kvm/svm.c | 36 ++++++++++++++++++++++++++++++++++--
1 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d4011cc..e656425 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1570,6 +1570,38 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
nested_svm_exit_handled_real);
}
+static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
+{
+ struct vmcb_control_area *dst = &dst_vmcb->control;
+ struct vmcb_control_area *from = &from_vmcb->control;
+
+ dst->intercept_cr_read = from->intercept_cr_read;
+ dst->intercept_cr_write = from->intercept_cr_write;
+ dst->intercept_dr_read = from->intercept_dr_read;
+ dst->intercept_dr_write = from->intercept_dr_write;
+ dst->intercept_exceptions = from->intercept_exceptions;
+ dst->intercept = from->intercept;
+ dst->iopm_base_pa = from->iopm_base_pa;
+ dst->msrpm_base_pa = from->msrpm_base_pa;
+ dst->tsc_offset = from->tsc_offset;
+ dst->asid = from->asid;
+ dst->tlb_ctl = from->tlb_ctl;
+ dst->int_ctl = from->int_ctl;
+ dst->int_vector = from->int_vector;
+ dst->int_state = from->int_state;
+ dst->exit_code = from->exit_code;
+ dst->exit_code_hi = from->exit_code_hi;
+ dst->exit_info_1 = from->exit_info_1;
+ dst->exit_info_2 = from->exit_info_2;
+ dst->exit_int_info = from->exit_int_info;
+ dst->exit_int_info_err = from->exit_int_info_err;
+ dst->nested_ctl = from->nested_ctl;
+ dst->event_inj = from->event_inj;
+ dst->event_inj_err = from->event_inj_err;
+ dst->nested_cr3 = from->nested_cr3;
+ dst->lbr_ctl = from->lbr_ctl;
+}
+
static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
void *arg2, void *opaque)
{
@@ -1615,7 +1647,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
/* Restore the original control entries */
- svm->vmcb->control = hsave->control;
+ copy_vmcb_control_area(vmcb, hsave);
/* Kill any pending exceptions */
if (svm->vcpu.arch.exception.pending == true)
@@ -1713,7 +1745,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
else
hsave->save.cr3 = svm->vcpu.arch.cr3;
- hsave->control = vmcb->control;
+ copy_vmcb_control_area(hsave, vmcb);
if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 05/21] kvm/svm: complete interrupts after handling nested exits
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (3 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 04/21] kvm/svm: copy only necessary parts of the control area on vmrun/vmexit Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 06/21] kvm/svm: move nested svm state into seperate struct Joerg Roedel
` (16 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
The interrupt completion code must run after nested exits are handled
because not injected interrupts or exceptions may be handled by the l1
guest first.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
arch/x86/kvm/svm.c | 5 +++--
1 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e656425..b51d288 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -112,6 +112,7 @@ static int nested = 0;
module_param(nested, int, S_IRUGO);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
+static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
static int nested_svm_vmexit(struct vcpu_svm *svm);
@@ -2325,6 +2326,8 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
}
+ svm_complete_interrupts(svm);
+
if (npt_enabled) {
int mmu_reload = 0;
if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
@@ -2691,8 +2694,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
}
-
- svm_complete_interrupts(svm);
}
#undef R
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 06/21] kvm/svm: move nested svm state into seperate struct
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (4 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 05/21] kvm/svm: complete interrupts after handling nested exits Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 07/21] kvm/svm: cache nested intercepts Joerg Roedel
` (15 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This makes it more clear for which purpose these members in the vcpu_svm
exist.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
arch/x86/kvm/svm.c | 62 +++++++++++++++++++++++++++------------------------
1 files changed, 33 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b51d288..b4e2587 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -70,6 +70,18 @@ static const u32 host_save_user_msrs[] = {
struct kvm_vcpu;
+struct nested_state {
+ struct vmcb *hsave;
+ u64 hsave_msr;
+ u64 vmcb;
+
+ /* These are the merged vectors */
+ u32 *msrpm;
+
+ /* gpa pointers to the real vectors */
+ u64 vmcb_msrpm;
+};
+
struct vcpu_svm {
struct kvm_vcpu vcpu;
struct vmcb *vmcb;
@@ -86,16 +98,8 @@ struct vcpu_svm {
u64 host_gs_base;
u32 *msrpm;
- struct vmcb *hsave;
- u64 hsave_msr;
-
- u64 nested_vmcb;
- /* These are the merged vectors */
- u32 *nested_msrpm;
-
- /* gpa pointers to the real vectors */
- u64 nested_vmcb_msrpm;
+ struct nested_state nested;
};
/* enable NPT for AMD64 and X86 with PAE */
@@ -128,7 +132,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
static inline bool is_nested(struct vcpu_svm *svm)
{
- return svm->nested_vmcb;
+ return svm->nested.vmcb;
}
static inline void enable_gif(struct vcpu_svm *svm)
@@ -639,7 +643,7 @@ static void init_vmcb(struct vcpu_svm *svm)
}
force_new_asid(&svm->vcpu);
- svm->nested_vmcb = 0;
+ svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
enable_gif(svm);
@@ -702,9 +706,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
goto uninit;
- svm->hsave = page_address(hsave_page);
+ svm->nested.hsave = page_address(hsave_page);
- svm->nested_msrpm = page_address(nested_msrpm_pages);
+ svm->nested.msrpm = page_address(nested_msrpm_pages);
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
@@ -734,8 +738,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
- __free_page(virt_to_page(svm->hsave));
- __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER);
+ __free_page(virt_to_page(svm->nested.hsave));
+ __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
}
@@ -1561,13 +1565,13 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
switch (svm->vmcb->control.exit_code) {
case SVM_EXIT_MSR:
- return nested_svm_do(svm, svm->nested_vmcb,
- svm->nested_vmcb_msrpm, NULL,
+ return nested_svm_do(svm, svm->nested.vmcb,
+ svm->nested.vmcb_msrpm, NULL,
nested_svm_exit_handled_msr);
default: break;
}
- return nested_svm_do(svm, svm->nested_vmcb, 0, &k,
+ return nested_svm_do(svm, svm->nested.vmcb, 0, &k,
nested_svm_exit_handled_real);
}
@@ -1607,7 +1611,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
void *arg2, void *opaque)
{
struct vmcb *nested_vmcb = (struct vmcb *)arg1;
- struct vmcb *hsave = svm->hsave;
+ struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
/* Give the current vmcb to the guest */
@@ -1682,7 +1686,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
svm->vmcb->control.exit_int_info = 0;
/* Exit nested SVM mode */
- svm->nested_vmcb = 0;
+ svm->nested.vmcb = 0;
return 0;
}
@@ -1690,7 +1694,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
static int nested_svm_vmexit(struct vcpu_svm *svm)
{
nsvm_printk("VMexit\n");
- if (nested_svm_do(svm, svm->nested_vmcb, 0,
+ if (nested_svm_do(svm, svm->nested.vmcb, 0,
NULL, nested_svm_vmexit_real))
return 1;
@@ -1706,8 +1710,8 @@ static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
int i;
u32 *nested_msrpm = (u32*)arg1;
for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
- svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
- svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
+ svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
+ svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
return 0;
}
@@ -1716,11 +1720,11 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
void *arg2, void *opaque)
{
struct vmcb *nested_vmcb = (struct vmcb *)arg1;
- struct vmcb *hsave = svm->hsave;
+ struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
/* nested_vmcb is our indicator if nested SVM is activated */
- svm->nested_vmcb = svm->vmcb->save.rax;
+ svm->nested.vmcb = svm->vmcb->save.rax;
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
@@ -1798,7 +1802,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
- svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
+ svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
force_new_asid(&svm->vcpu);
svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
@@ -1900,7 +1904,7 @@ static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
NULL, nested_svm_vmrun))
return 1;
- if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0,
+ if (nested_svm_do(svm, svm->nested.vmcb_msrpm, 0,
NULL, nested_svm_vmrun_msrpm))
return 1;
@@ -2110,7 +2114,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
*data = svm->vmcb->save.last_excp_to;
break;
case MSR_VM_HSAVE_PA:
- *data = svm->hsave_msr;
+ *data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
*data = 0;
@@ -2196,7 +2200,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
svm_disable_lbrv(svm);
break;
case MSR_VM_HSAVE_PA:
- svm->hsave_msr = data;
+ svm->nested.hsave_msr = data;
break;
case MSR_VM_CR:
case MSR_VM_IGNNE:
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 07/21] kvm/svm: cache nested intercepts
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (5 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 06/21] kvm/svm: move nested svm state into seperate struct Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 08/21] kvm/svm: consolidate nested_svm_exit_handled Joerg Roedel
` (14 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
When the nested intercepts are cached we don't need to call
get_user_pages and/or map the nested vmcb on every nested #vmexit to
check who will handle the intercept.
Further this patch aligns the emulated svm behavior better to real
hardware.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 30 +++++++++++++++++++++++-------
1 files changed, 23 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b4e2587..75fdfe7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -80,6 +80,15 @@ struct nested_state {
/* gpa pointers to the real vectors */
u64 vmcb_msrpm;
+
+ /* cache for intercepts of the guest */
+ u16 intercept_cr_read;
+ u16 intercept_cr_write;
+ u16 intercept_dr_read;
+ u16 intercept_dr_write;
+ u32 intercept_exceptions;
+ u64 intercept;
+
};
struct vcpu_svm {
@@ -1455,7 +1464,6 @@ static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
void *arg2,
void *opaque)
{
- struct vmcb *nested_vmcb = (struct vmcb *)arg1;
bool kvm_overrides = *(bool *)opaque;
u32 exit_code = svm->vmcb->control.exit_code;
@@ -1482,38 +1490,38 @@ static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
switch (exit_code) {
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
- if (nested_vmcb->control.intercept_cr_read & cr_bits)
+ if (svm->nested.intercept_cr_read & cr_bits)
return 1;
break;
}
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
- if (nested_vmcb->control.intercept_cr_write & cr_bits)
+ if (svm->nested.intercept_cr_write & cr_bits)
return 1;
break;
}
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
- if (nested_vmcb->control.intercept_dr_read & dr_bits)
+ if (svm->nested.intercept_dr_read & dr_bits)
return 1;
break;
}
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
- if (nested_vmcb->control.intercept_dr_write & dr_bits)
+ if (svm->nested.intercept_dr_write & dr_bits)
return 1;
break;
}
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
- if (nested_vmcb->control.intercept_exceptions & excp_bits)
+ if (svm->nested.intercept_exceptions & excp_bits)
return 1;
break;
}
default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
nsvm_printk("exit code: 0x%x\n", exit_code);
- if (nested_vmcb->control.intercept & exit_bits)
+ if (svm->nested.intercept & exit_bits)
return 1;
}
}
@@ -1804,6 +1812,14 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
+ /* cache intercepts */
+ svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
+ svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write;
+ svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read;
+ svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write;
+ svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
+ svm->nested.intercept = nested_vmcb->control.intercept;
+
force_new_asid(&svm->vcpu);
svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 08/21] kvm/svm: consolidate nested_svm_exit_handled
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (6 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 07/21] kvm/svm: cache nested intercepts Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 09/21] kvm/svm: do nested vmexit in nested_svm_exit_handled Joerg Roedel
` (13 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
When caching guest intercepts there is no need anymore for the
nested_svm_exit_handled_real function. So move its code into
nested_svm_exit_handled.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
arch/x86/kvm/svm.c | 109 +++++++++++++++++++++++----------------------------
1 files changed, 49 insertions(+), 60 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 75fdfe7..b2c9a9e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1459,15 +1459,58 @@ static int nested_svm_do(struct vcpu_svm *svm,
return retval;
}
-static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
- void *arg1,
- void *arg2,
- void *opaque)
+static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
+ void *arg1, void *arg2,
+ void *opaque)
+{
+ struct vmcb *nested_vmcb = (struct vmcb *)arg1;
+ u8 *msrpm = (u8 *)arg2;
+ u32 t0, t1;
+ u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+ u32 param = svm->vmcb->control.exit_info_1 & 1;
+
+ if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+ return 0;
+
+ switch (msr) {
+ case 0 ... 0x1fff:
+ t0 = (msr * 2) % 8;
+ t1 = msr / 8;
+ break;
+ case 0xc0000000 ... 0xc0001fff:
+ t0 = (8192 + msr - 0xc0000000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ case 0xc0010000 ... 0xc0011fff:
+ t0 = (16384 + msr - 0xc0010000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ default:
+ return 1;
+ break;
+ }
+ if (msrpm[t1] & ((1 << param) << t0))
+ return 1;
+
+ return 0;
+}
+
+static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
{
- bool kvm_overrides = *(bool *)opaque;
u32 exit_code = svm->vmcb->control.exit_code;
- if (kvm_overrides) {
+ switch (svm->vmcb->control.exit_code) {
+ case SVM_EXIT_MSR:
+ return nested_svm_do(svm, svm->nested.vmcb,
+ svm->nested.vmcb_msrpm, NULL,
+ nested_svm_exit_handled_msr);
+ default:
+ break;
+ }
+
+ if (kvm_override) {
switch (exit_code) {
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
@@ -1529,60 +1572,6 @@ static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
return 0;
}
-static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
- void *arg1, void *arg2,
- void *opaque)
-{
- struct vmcb *nested_vmcb = (struct vmcb *)arg1;
- u8 *msrpm = (u8 *)arg2;
- u32 t0, t1;
- u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- u32 param = svm->vmcb->control.exit_info_1 & 1;
-
- if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
- return 0;
-
- switch(msr) {
- case 0 ... 0x1fff:
- t0 = (msr * 2) % 8;
- t1 = msr / 8;
- break;
- case 0xc0000000 ... 0xc0001fff:
- t0 = (8192 + msr - 0xc0000000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- case 0xc0010000 ... 0xc0011fff:
- t0 = (16384 + msr - 0xc0010000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- default:
- return 1;
- break;
- }
- if (msrpm[t1] & ((1 << param) << t0))
- return 1;
-
- return 0;
-}
-
-static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
-{
- bool k = kvm_override;
-
- switch (svm->vmcb->control.exit_code) {
- case SVM_EXIT_MSR:
- return nested_svm_do(svm, svm->nested.vmcb,
- svm->nested.vmcb_msrpm, NULL,
- nested_svm_exit_handled_msr);
- default: break;
- }
-
- return nested_svm_do(svm, svm->nested.vmcb, 0, &k,
- nested_svm_exit_handled_real);
-}
-
static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
{
struct vmcb_control_area *dst = &dst_vmcb->control;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 09/21] kvm/svm: do nested vmexit in nested_svm_exit_handled
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (7 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 08/21] kvm/svm: consolidate nested_svm_exit_handled Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 10/21] kvm/svm: simplify nested_svm_check_exception Joerg Roedel
` (12 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
If this function returns true a nested vmexit is required. Move that
vmexit into the nested_svm_exit_handled function. This also simplifies
the handling of nested #pf intercepts in this function.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
arch/x86/kvm/svm.c | 42 +++++++++++++++++++-----------------------
1 files changed, 19 insertions(+), 23 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b2c9a9e..c473ad9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1369,8 +1369,6 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
if (nested_svm_exit_handled(svm, false)) {
nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
-
- nested_svm_vmexit(svm);
return 1;
}
}
@@ -1391,7 +1389,6 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
if (nested_svm_exit_handled(svm, false)) {
nsvm_printk("VMexit -> INTR\n");
- nested_svm_vmexit(svm);
return 1;
}
}
@@ -1500,15 +1497,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
{
u32 exit_code = svm->vmcb->control.exit_code;
-
- switch (svm->vmcb->control.exit_code) {
- case SVM_EXIT_MSR:
- return nested_svm_do(svm, svm->nested.vmcb,
- svm->nested.vmcb_msrpm, NULL,
- nested_svm_exit_handled_msr);
- default:
- break;
- }
+ bool vmexit = false;
if (kvm_override) {
switch (exit_code) {
@@ -1531,45 +1520,55 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
}
switch (exit_code) {
+ case SVM_EXIT_MSR:
+ if (nested_svm_do(svm, svm->nested.vmcb, svm->nested.vmcb_msrpm,
+ NULL, nested_svm_exit_handled_msr))
+ vmexit = true;
+ break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
if (svm->nested.intercept_cr_read & cr_bits)
- return 1;
+ vmexit = true;
break;
}
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
if (svm->nested.intercept_cr_write & cr_bits)
- return 1;
+ vmexit = true;
break;
}
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
if (svm->nested.intercept_dr_read & dr_bits)
- return 1;
+ vmexit = true;
break;
}
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
if (svm->nested.intercept_dr_write & dr_bits)
- return 1;
+ vmexit = true;
break;
}
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
if (svm->nested.intercept_exceptions & excp_bits)
- return 1;
+ vmexit = true;
break;
}
default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
nsvm_printk("exit code: 0x%x\n", exit_code);
if (svm->nested.intercept & exit_bits)
- return 1;
+ vmexit = true;
}
}
- return 0;
+ if (vmexit) {
+ nsvm_printk("#VMEXIT reason=%04x\n", exit_code);
+ nested_svm_vmexit(svm);
+ }
+
+ return vmexit;
}
static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
@@ -2328,11 +2327,8 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
exit_code, svm->vmcb->control.exit_info_1,
svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
- if (nested_svm_exit_handled(svm, true)) {
- nested_svm_vmexit(svm);
- nsvm_printk("-> #VMEXIT\n");
+ if (nested_svm_exit_handled(svm, true))
return 1;
- }
}
svm_complete_interrupts(svm);
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 10/21] kvm/svm: simplify nested_svm_check_exception
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (8 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 09/21] kvm/svm: do nested vmexit in nested_svm_exit_handled Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 11/21] kvm/svm: get rid of nested_svm_vmexit_real Joerg Roedel
` (11 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
Makes the code of this function more readable by removing on
indentation level for the core logic.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 19 ++++++++-----------
1 files changed, 8 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c473ad9..a85b0a2 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1362,18 +1362,15 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code)
{
- if (is_nested(svm)) {
- svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
- svm->vmcb->control.exit_code_hi = 0;
- svm->vmcb->control.exit_info_1 = error_code;
- svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
- if (nested_svm_exit_handled(svm, false)) {
- nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
- return 1;
- }
- }
+ if (!is_nested(svm))
+ return 0;
- return 0;
+ svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
+ svm->vmcb->control.exit_code_hi = 0;
+ svm->vmcb->control.exit_info_1 = error_code;
+ svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
+
+ return nested_svm_exit_handled(svm, false);
}
static inline int nested_svm_intr(struct vcpu_svm *svm)
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 11/21] kvm/svm: get rid of nested_svm_vmexit_real
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (9 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 10/21] kvm/svm: simplify nested_svm_check_exception Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 12/21] kvm/svm: clean up nested_svm_exit_handled_msr Joerg Roedel
` (10 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This patch is the starting point of removing nested_svm_do from the
nested svm code. The nested_svm_do function basically maps two guest
physical pages to host virtual addresses and calls a passed function
on it. This function pointer code flow is hard to read and not the
best technical solution here.
As a side effect this patch indroduces the nested_svm_[un]map helper
functions.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 52 ++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 40 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a85b0a2..1753a64 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1393,6 +1393,39 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
return 0;
}
+static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
+{
+ struct page *page;
+
+ down_read(¤t->mm->mmap_sem);
+ page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
+ up_read(¤t->mm->mmap_sem);
+
+ if (is_error_page(page))
+ goto error;
+
+ return kmap_atomic(page, idx);
+
+error:
+ kvm_release_page_clean(page);
+ kvm_inject_gp(&svm->vcpu, 0);
+
+ return NULL;
+}
+
+static void nested_svm_unmap(void *addr, enum km_type idx)
+{
+ struct page *page;
+
+ if (!addr)
+ return;
+
+ page = kmap_atomic_to_page(addr);
+
+ kunmap_atomic(addr, idx);
+ kvm_release_page_dirty(page);
+}
+
static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
{
struct page *page;
@@ -1600,13 +1633,16 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
dst->lbr_ctl = from->lbr_ctl;
}
-static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
- void *arg2, void *opaque)
+static int nested_svm_vmexit(struct vcpu_svm *svm)
{
- struct vmcb *nested_vmcb = (struct vmcb *)arg1;
+ struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+ if (!nested_vmcb)
+ return 1;
+
/* Give the current vmcb to the guest */
disable_gif(svm);
@@ -1681,15 +1717,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
/* Exit nested SVM mode */
svm->nested.vmcb = 0;
- return 0;
-}
-
-static int nested_svm_vmexit(struct vcpu_svm *svm)
-{
- nsvm_printk("VMexit\n");
- if (nested_svm_do(svm, svm->nested.vmcb, 0,
- NULL, nested_svm_vmexit_real))
- return 1;
+ nested_svm_unmap(nested_vmcb, KM_USER0);
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 12/21] kvm/svm: clean up nested_svm_exit_handled_msr
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (10 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 11/21] kvm/svm: get rid of nested_svm_vmexit_real Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 13/21] kvm/svm: clean up nestec vmload/vmsave paths Joerg Roedel
` (9 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This patch changes nested svm to call nested_svm_exit_handled_msr
directly and not through nested_svm_do.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 37 ++++++++++++++++++++++---------------
1 files changed, 22 insertions(+), 15 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1753a64..448d493 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1486,15 +1486,20 @@ static int nested_svm_do(struct vcpu_svm *svm,
return retval;
}
-static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
- void *arg1, void *arg2,
- void *opaque)
+static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
- struct vmcb *nested_vmcb = (struct vmcb *)arg1;
- u8 *msrpm = (u8 *)arg2;
- u32 t0, t1;
- u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u32 param = svm->vmcb->control.exit_info_1 & 1;
+ u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+ struct vmcb *nested_vmcb;
+ bool ret = false;
+ u32 t0, t1;
+ u8 *msrpm;
+
+ nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+ msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER1);
+
+ if (!nested_vmcb || !msrpm)
+ goto out;
if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
return 0;
@@ -1515,13 +1520,17 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
t0 %= 8;
break;
default:
- return 1;
- break;
+ ret = true;
+ goto out;
}
- if (msrpm[t1] & ((1 << param) << t0))
- return 1;
- return 0;
+ ret = msrpm[t1] & ((1 << param) << t0);
+
+out:
+ nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(msrpm, KM_USER1);
+
+ return ret;
}
static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
@@ -1551,9 +1560,7 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
switch (exit_code) {
case SVM_EXIT_MSR:
- if (nested_svm_do(svm, svm->nested.vmcb, svm->nested.vmcb_msrpm,
- NULL, nested_svm_exit_handled_msr))
- vmexit = true;
+ vmexit = nested_svm_exit_handled_msr(svm);
break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 13/21] kvm/svm: clean up nestec vmload/vmsave paths
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (11 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 12/21] kvm/svm: clean up nested_svm_exit_handled_msr Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 14/21] kvm/svm: clean up nested vmrun path Joerg Roedel
` (8 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This patch removes the usage of nested_svm_do from the vmload and
vmsave emulation code paths.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 36 +++++++++++++++++-------------------
1 files changed, 17 insertions(+), 19 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 448d493..c2ca953 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -129,8 +129,6 @@ static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
static int nested_svm_vmexit(struct vcpu_svm *svm);
-static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
- void *arg2, void *opaque);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
@@ -1871,7 +1869,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
return 0;
}
-static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
{
to_vmcb->save.fs = from_vmcb->save.fs;
to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1885,44 +1883,44 @@ static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
-
- return 1;
-}
-
-static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
- void *arg2, void *opaque)
-{
- return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
-}
-
-static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
- void *arg2, void *opaque)
-{
- return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
}
static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
+ struct vmcb *nested_vmcb;
+
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ if (!nested_vmcb)
+ return 1;
+
+ nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
+ nested_svm_unmap(nested_vmcb, KM_USER0);
return 1;
}
static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
+ struct vmcb *nested_vmcb;
+
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ if (!nested_vmcb)
+ return 1;
+
+ nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
+ nested_svm_unmap(nested_vmcb, KM_USER0);
return 1;
}
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 14/21] kvm/svm: clean up nested vmrun path
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (12 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 13/21] kvm/svm: clean up nestec vmload/vmsave paths Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 15/21] kvm/svm: remove nested_svm_do and helper functions Joerg Roedel
` (7 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This patch removes the usage of nested_svm_do from the vmrun emulation
path.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 34 ++++++++++++++++++++++------------
1 files changed, 22 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c2ca953..c1e3f46 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1730,25 +1730,35 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
return 0;
}
-static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
- void *arg2, void *opaque)
+static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
+ u32 *nested_msrpm;
int i;
- u32 *nested_msrpm = (u32*)arg1;
+
+ nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+ if (!nested_msrpm)
+ return false;
+
for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
+
svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
- return 0;
+ nested_svm_unmap(nested_msrpm, KM_USER0);
+
+ return true;
}
-static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
- void *arg2, void *opaque)
+static bool nested_svm_vmrun(struct vcpu_svm *svm)
{
- struct vmcb *nested_vmcb = (struct vmcb *)arg1;
+ struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ if (!nested_vmcb)
+ return false;
+
/* nested_vmcb is our indicator if nested SVM is activated */
svm->nested.vmcb = svm->vmcb->save.rax;
@@ -1864,9 +1874,11 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
+ nested_svm_unmap(nested_vmcb, KM_USER0);
+
enable_gif(svm);
- return 0;
+ return true;
}
static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
@@ -1934,12 +1946,10 @@ static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- if (nested_svm_do(svm, svm->vmcb->save.rax, 0,
- NULL, nested_svm_vmrun))
+ if (!nested_svm_vmrun(svm))
return 1;
- if (nested_svm_do(svm, svm->nested.vmcb_msrpm, 0,
- NULL, nested_svm_vmrun_msrpm))
+ if (!nested_svm_vmrun_msrpm(svm))
return 1;
return 1;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 15/21] kvm/svm: remove nested_svm_do and helper functions
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (13 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 14/21] kvm/svm: clean up nested vmrun path Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 16/21] kvm/svm: handle errors in vmrun emulation path appropriatly Joerg Roedel
` (6 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This function is not longer required. So remove it.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 60 ----------------------------------------------------
1 files changed, 0 insertions(+), 60 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c1e3f46..63d2f63 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1424,66 +1424,6 @@ static void nested_svm_unmap(void *addr, enum km_type idx)
kvm_release_page_dirty(page);
}
-static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
-{
- struct page *page;
-
- down_read(¤t->mm->mmap_sem);
- page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
- up_read(¤t->mm->mmap_sem);
-
- if (is_error_page(page)) {
- printk(KERN_INFO "%s: could not find page at 0x%llx\n",
- __func__, gpa);
- kvm_release_page_clean(page);
- kvm_inject_gp(&svm->vcpu, 0);
- return NULL;
- }
- return page;
-}
-
-static int nested_svm_do(struct vcpu_svm *svm,
- u64 arg1_gpa, u64 arg2_gpa, void *opaque,
- int (*handler)(struct vcpu_svm *svm,
- void *arg1,
- void *arg2,
- void *opaque))
-{
- struct page *arg1_page;
- struct page *arg2_page = NULL;
- void *arg1;
- void *arg2 = NULL;
- int retval;
-
- arg1_page = nested_svm_get_page(svm, arg1_gpa);
- if(arg1_page == NULL)
- return 1;
-
- if (arg2_gpa) {
- arg2_page = nested_svm_get_page(svm, arg2_gpa);
- if(arg2_page == NULL) {
- kvm_release_page_clean(arg1_page);
- return 1;
- }
- }
-
- arg1 = kmap_atomic(arg1_page, KM_USER0);
- if (arg2_gpa)
- arg2 = kmap_atomic(arg2_page, KM_USER1);
-
- retval = handler(svm, arg1, arg2, opaque);
-
- kunmap_atomic(arg1, KM_USER0);
- if (arg2_gpa)
- kunmap_atomic(arg2, KM_USER1);
-
- kvm_release_page_dirty(arg1_page);
- if (arg2_gpa)
- kvm_release_page_dirty(arg2_page);
-
- return retval;
-}
-
static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
u32 param = svm->vmcb->control.exit_info_1 & 1;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 16/21] kvm/svm: handle errors in vmrun emulation path appropriatly
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (14 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 15/21] kvm/svm: remove nested_svm_do and helper functions Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 17/21] kvm/svm: move special nested exit handling to separate function Joerg Roedel
` (5 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
If nested svm fails to load the msrpm the vmrun succeeds with the old
msrpm which is not correct. This patch changes the logic to roll back
to host mode in case the msrpm cannot be loaded.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 14 +++++++++++++-
1 files changed, 13 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 63d2f63..cad7582 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1880,6 +1880,7 @@ static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
nsvm_printk("VMrun\n");
+
if (nested_svm_check_permissions(svm))
return 1;
@@ -1890,7 +1891,18 @@ static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
return 1;
if (!nested_svm_vmrun_msrpm(svm))
- return 1;
+ goto failed;
+
+ return 1;
+
+failed:
+
+ svm->vmcb->control.exit_code = SVM_EXIT_ERR;
+ svm->vmcb->control.exit_code_hi = 0;
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
+
+ nested_svm_vmexit(svm);
return 1;
}
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 17/21] kvm/svm: move special nested exit handling to separate function
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (15 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 16/21] kvm/svm: handle errors in vmrun emulation path appropriatly Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 18/21] kvm/svm: remove unnecessary is_nested check from svm_cpu_run Joerg Roedel
` (4 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This patch moves the handling for special nested vmexits like #pf to a
separate function. This makes the kvm_override parameter obsolete and
makes the code more readable.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 80 ++++++++++++++++++++++++++++++++-------------------
1 files changed, 50 insertions(+), 30 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cad7582..1839c19 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -47,6 +47,10 @@ MODULE_LICENSE("GPL");
#define SVM_FEATURE_LBRV (1 << 1)
#define SVM_FEATURE_SVML (1 << 2)
+#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
+#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
+#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
+
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
/* Turn on to get debugging output*/
@@ -127,7 +131,7 @@ module_param(nested, int, S_IRUGO);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);
-static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
+static int nested_svm_exit_handled(struct vcpu_svm *svm);
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
@@ -1368,7 +1372,7 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_info_1 = error_code;
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
- return nested_svm_exit_handled(svm, false);
+ return nested_svm_exit_handled(svm);
}
static inline int nested_svm_intr(struct vcpu_svm *svm)
@@ -1382,7 +1386,7 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
svm->vmcb->control.exit_code = SVM_EXIT_INTR;
- if (nested_svm_exit_handled(svm, false)) {
+ if (nested_svm_exit_handled(svm)) {
nsvm_printk("VMexit -> INTR\n");
return 1;
}
@@ -1471,31 +1475,39 @@ out:
return ret;
}
-static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
+static int nested_svm_exit_special(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
- bool vmexit = false;
- if (kvm_override) {
- switch (exit_code) {
- case SVM_EXIT_INTR:
- case SVM_EXIT_NMI:
- return 0;
+ switch (exit_code) {
+ case SVM_EXIT_INTR:
+ case SVM_EXIT_NMI:
+ return NESTED_EXIT_HOST;
/* For now we are always handling NPFs when using them */
- case SVM_EXIT_NPF:
- if (npt_enabled)
- return 0;
- break;
- /* When we're shadowing, trap PFs */
- case SVM_EXIT_EXCP_BASE + PF_VECTOR:
- if (!npt_enabled)
- return 0;
- break;
- default:
- break;
- }
+ case SVM_EXIT_NPF:
+ if (npt_enabled)
+ return NESTED_EXIT_HOST;
+ break;
+ /* When we're shadowing, trap PFs */
+ case SVM_EXIT_EXCP_BASE + PF_VECTOR:
+ if (!npt_enabled)
+ return NESTED_EXIT_HOST;
+ break;
+ default:
+ break;
}
+ return NESTED_EXIT_CONTINUE;
+}
+
+/*
+ * If this function returns true, this #vmexit was already handled
+ */
+static int nested_svm_exit_handled(struct vcpu_svm *svm)
+{
+ u32 exit_code = svm->vmcb->control.exit_code;
+ int vmexit = NESTED_EXIT_HOST;
+
switch (exit_code) {
case SVM_EXIT_MSR:
vmexit = nested_svm_exit_handled_msr(svm);
@@ -1503,42 +1515,42 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
if (svm->nested.intercept_cr_read & cr_bits)
- vmexit = true;
+ vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
if (svm->nested.intercept_cr_write & cr_bits)
- vmexit = true;
+ vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
if (svm->nested.intercept_dr_read & dr_bits)
- vmexit = true;
+ vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
if (svm->nested.intercept_dr_write & dr_bits)
- vmexit = true;
+ vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
if (svm->nested.intercept_exceptions & excp_bits)
- vmexit = true;
+ vmexit = NESTED_EXIT_DONE;
break;
}
default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
nsvm_printk("exit code: 0x%x\n", exit_code);
if (svm->nested.intercept & exit_bits)
- vmexit = true;
+ vmexit = NESTED_EXIT_DONE;
}
}
- if (vmexit) {
+ if (vmexit == NESTED_EXIT_DONE) {
nsvm_printk("#VMEXIT reason=%04x\n", exit_code);
nested_svm_vmexit(svm);
}
@@ -2316,10 +2328,18 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
trace_kvm_exit(exit_code, svm->vmcb->save.rip);
if (is_nested(svm)) {
+ int vmexit;
+
nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
exit_code, svm->vmcb->control.exit_info_1,
svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
- if (nested_svm_exit_handled(svm, true))
+
+ vmexit = nested_svm_exit_special(svm);
+
+ if (vmexit == NESTED_EXIT_CONTINUE)
+ vmexit = nested_svm_exit_handled(svm);
+
+ if (vmexit == NESTED_EXIT_DONE)
return 1;
}
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 18/21] kvm/svm: remove unnecessary is_nested check from svm_cpu_run
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (16 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 17/21] kvm/svm: move special nested exit handling to separate function Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 19/21] kvm/svm: move nested_svm_intr main logic out of if-clause Joerg Roedel
` (3 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This check is not necessary. We have to sync the vcpu->arch.cr2 always
back to the VMCB. This patch remove the is_nested check.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1839c19..9a2354d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2606,8 +2606,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
fs_selector = kvm_read_fs();
gs_selector = kvm_read_gs();
ldt_selector = kvm_read_ldt();
- if (!is_nested(svm))
- svm->vmcb->save.cr2 = vcpu->arch.cr2;
+ svm->vmcb->save.cr2 = vcpu->arch.cr2;
/* required for live migration with NPT */
if (npt_enabled)
svm->vmcb->save.cr3 = vcpu->arch.cr3;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 19/21] kvm/svm: move nested_svm_intr main logic out of if-clause
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (17 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 18/21] kvm/svm: remove unnecessary is_nested check from svm_cpu_run Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 20/21] kvm/svm: check for nested VINTR flag in svm_interrupt_allowed Joerg Roedel
` (2 subsequent siblings)
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
This patch removes one indentation level from nested_svm_intr and
makes the logic more readable.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 21 +++++++++++----------
1 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9a2354d..089310d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1377,19 +1377,20 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
static inline int nested_svm_intr(struct vcpu_svm *svm)
{
- if (is_nested(svm)) {
- if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
- return 0;
+ if (!is_nested(svm))
+ return 0;
- if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
- return 0;
+ if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
+ return 0;
- svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+ if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
+ return 0;
- if (nested_svm_exit_handled(svm)) {
- nsvm_printk("VMexit -> INTR\n");
- return 1;
- }
+ svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+
+ if (nested_svm_exit_handled(svm)) {
+ nsvm_printk("VMexit -> INTR\n");
+ return 1;
}
return 0;
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 20/21] kvm/svm: check for nested VINTR flag in svm_interrupt_allowed
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (18 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 19/21] kvm/svm: move nested_svm_intr main logic out of if-clause Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-07 9:49 ` [PATCH 21/21] kvm/svm: enable nested svm by default Joerg Roedel
2009-08-09 9:41 ` [PATCH 0/21] Nested SVM cleanups v2 Avi Kivity
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
Not checking for this flag breaks any nested hypervisor that does not
set VINTR. So fix it with this patch.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 089310d..8b32cc8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2467,7 +2467,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
return (vmcb->save.rflags & X86_EFLAGS_IF) &&
!(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
gif_set(svm) &&
- !is_nested(svm);
+ !(is_nested(svm) && (svm->vcpu.arch.hflags & HF_VINTR_MASK));
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 21/21] kvm/svm: enable nested svm by default
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (19 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 20/21] kvm/svm: check for nested VINTR flag in svm_interrupt_allowed Joerg Roedel
@ 2009-08-07 9:49 ` Joerg Roedel
2009-08-09 9:41 ` [PATCH 0/21] Nested SVM cleanups v2 Avi Kivity
21 siblings, 0 replies; 23+ messages in thread
From: Joerg Roedel @ 2009-08-07 9:49 UTC (permalink / raw)
To: Avi Kivity; +Cc: Alexander Graf, kvm, linux-kernel, Joerg Roedel
Nested SVM is (in my experience) stable enough to be enabled by
default. So omit the requirement to pass a module parameter.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
arch/x86/kvm/svm.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8b32cc8..8a28956 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -125,7 +125,7 @@ static int npt = 1;
module_param(npt, int, S_IRUGO);
-static int nested = 0;
+static int nested = 1;
module_param(nested, int, S_IRUGO);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
--
1.6.3.3
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 0/21] Nested SVM cleanups v2
2009-08-07 9:49 [PATCH 0/21] Nested SVM cleanups v2 Joerg Roedel
` (20 preceding siblings ...)
2009-08-07 9:49 ` [PATCH 21/21] kvm/svm: enable nested svm by default Joerg Roedel
@ 2009-08-09 9:41 ` Avi Kivity
21 siblings, 0 replies; 23+ messages in thread
From: Avi Kivity @ 2009-08-09 9:41 UTC (permalink / raw)
To: Joerg Roedel; +Cc: Alexander Graf, kvm, linux-kernel
On 08/07/2009 12:49 PM, Joerg Roedel wrote:
> Hi,
>
> this is the second and extended version of my patchset to clean up the code for
> SVM virtualization in KVM. The patchset was tested with KVM on KVM and showed
> no regressions. It was tested with Nested Paging and with Shadow Paging on the
> first-level guest.
> As a major change this patchset enables the nested SVM code by
> default. It is still required to start qemu with -enable-nesting,
> though.
>
Applied, thanks.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 23+ messages in thread