* RFC: cache_regs in kvm_emulate_pio
@ 2008-06-19 23:31 Marcelo Tosatti
2008-06-20 20:30 ` Avi Kivity
0 siblings, 1 reply; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-19 23:31 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
Hi,
>From my understanding the ->cache_regs call on kvm_emulate_pio() is
necessary only on AMD, where vcpu->arch.regs[RAX] is not copied during
exit in svm_vcpu_load().
On both architectures, the remaining general purpose registers are saved
on exit.
The following patch saves 100 cycles out of both light and heavy exits
on Intel (if correct, kvm_emulate_hypercall and complete_pio could also
benefit, thus saving 200 cycles for in-kernel devices).
BTW, the decache_regs(vcpu) call at the end of complete_pio() could also
be a noop on Intel from what I can tell ?
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 238e8f3..6f247cc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -709,6 +709,13 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc);
}
+static void svm_cache_rax(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+}
+
static void svm_cache_regs(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1949,6 +1956,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_gdt = svm_set_gdt,
.get_dr = svm_get_dr,
.set_dr = svm_set_dr,
+ .cache_rax = svm_cache_rax,
.cache_regs = svm_cache_regs,
.decache_regs = svm_decache_regs,
.get_rflags = svm_get_rflags,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e4278d..0d9a148 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -932,6 +932,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
}
/*
+ * vcpu->arch.regs[RAX] already loaded by vmx_vcpu_run().
+ */
+static void vcpu_load_rax(struct kvm_vcpu *vcpu)
+{
+}
+
+/*
* Sync the rsp and rip registers into the vcpu structure. This allows
* registers to be accessed by indexing vcpu->arch.regs.
*/
@@ -3213,6 +3220,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
+ .cache_rax = vcpu_load_rax,
.cache_regs = vcpu_load_rsp_rip,
.decache_regs = vcpu_put_rsp_rip,
.get_rflags = vmx_get_rflags,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 26b051b..6111946 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2302,7 +2302,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
handler);
- kvm_x86_ops->cache_regs(vcpu);
+ kvm_x86_ops->cache_rax(vcpu);
memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
kvm_x86_ops->skip_emulated_instruction(vcpu);
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 851184d..95a0736 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -410,6 +410,7 @@ struct kvm_x86_ops {
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
int *exception);
+ void (*cache_rax)(struct kvm_vcpu *vcpu);
void (*cache_regs)(struct kvm_vcpu *vcpu);
void (*decache_regs)(struct kvm_vcpu *vcpu);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-19 23:31 Marcelo Tosatti
@ 2008-06-20 20:30 ` Avi Kivity
2008-06-20 21:24 ` Marcelo Tosatti
0 siblings, 1 reply; 12+ messages in thread
From: Avi Kivity @ 2008-06-20 20:30 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm-devel
Marcelo Tosatti wrote:
> Hi,
>
> From my understanding the ->cache_regs call on kvm_emulate_pio() is
> necessary only on AMD, where vcpu->arch.regs[RAX] is not copied during
> exit in svm_vcpu_load().
>
> On both architectures, the remaining general purpose registers are saved
> on exit.
>
> The following patch saves 100 cycles out of both light and heavy exits
> on Intel (if correct, kvm_emulate_hypercall and complete_pio could also
> benefit, thus saving 200 cycles for in-kernel devices).
>
ISTR vmwrite as 50 cycles and vmread as much lower.
> BTW, the decache_regs(vcpu) call at the end of complete_pio() could also
> be a noop on Intel from what I can tell ?
>
>
I think so. decache_regs() is actually more important.
> diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
> index 851184d..95a0736 100644
> --- a/include/asm-x86/kvm_host.h
> +++ b/include/asm-x86/kvm_host.h
> @@ -410,6 +410,7 @@ struct kvm_x86_ops {
> unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
> void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
> int *exception);
> + void (*cache_rax)(struct kvm_vcpu *vcpu);
> void (*cache_regs)(struct kvm_vcpu *vcpu);
> void (*decache_regs)(struct kvm_vcpu *vcpu);
> unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
>
ugh, another callback. how about instead
/* in vcpu structure */
u16 regs_available;
u16 regs_dirty;
/* read from cache if possible */
if (!test_bit(VCPU_REG_RAX, ®s_available))
->cache_regs();
printk("%d\n", regs[VCPU_REGS_RAX]);
/* write to cache, ->vcpu_run() will flush */
regs[VCPU_REGS_RAX] = 17;
__set_bit(VCPU_REGS_RAX, ®s_dirty);
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-20 20:30 ` Avi Kivity
@ 2008-06-20 21:24 ` Marcelo Tosatti
2008-06-21 7:04 ` Avi Kivity
0 siblings, 1 reply; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-20 21:24 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Fri, Jun 20, 2008 at 11:30:05PM +0300, Avi Kivity wrote:
> Marcelo Tosatti wrote:
>> Hi,
>>
>> From my understanding the ->cache_regs call on kvm_emulate_pio() is
>> necessary only on AMD, where vcpu->arch.regs[RAX] is not copied during
>> exit in svm_vcpu_load().
>>
>> On both architectures, the remaining general purpose registers are saved
>> on exit.
>>
>> The following patch saves 100 cycles out of both light and heavy exits
>> on Intel (if correct, kvm_emulate_hypercall and complete_pio could also
>> benefit, thus saving 200 cycles for in-kernel devices).
>>
>
> ISTR vmwrite as 50 cycles and vmread as much lower.
On my 1.60GHz textbox ->cache_regs takes 114 cycles, measured with
rdtscll() before and after (rdtscll() takes 90 cycles by itself, due to
the barriers I guess, so the exact number was 204 cycles). Calling the
empty ->cache_rax takes 6 cycles.
>> BTW, the decache_regs(vcpu) call at the end of complete_pio() could also
>> be a noop on Intel from what I can tell ?
>>
>>
>
> I think so. decache_regs() is actually more important.
>
>> int *exception);
>> + void (*cache_rax)(struct kvm_vcpu *vcpu);
>> void (*cache_regs)(struct kvm_vcpu *vcpu);
>
> ugh, another callback. how about instead
>
> /* in vcpu structure */
> u16 regs_available;
> u16 regs_dirty;
>
> /* read from cache if possible */
> if (!test_bit(VCPU_REG_RAX, ®s_available))
> ->cache_regs();
> printk("%d\n", regs[VCPU_REGS_RAX]);
>
> /* write to cache, ->vcpu_run() will flush */
> regs[VCPU_REGS_RAX] = 17;
> __set_bit(VCPU_REGS_RAX, ®s_dirty);
I think that hiding whether registers are cached or not behing wrappers
makes a lot of sense, but having the ->cache_regs interface split can
also result in gains. An index argument to ->cache_regs() would do the
trick.
For example, there's no need to read GUEST_RSP for
skip_emulated_instruction, thats another 50+ cycles.
Unless there's something obscure that means you need to read RSP/RIP
before accessing the now in-memory guest registers saved with "mov"
in vmx_vcpu_run(). The comment on vcpu_load_rsp_rip seems a little
ambiguous to me:
/*
* Sync the rsp and rip registers into the vcpu structure. This allows
* registers to be accessed by indexing vcpu->arch.regs.
*/
But I think it just refers to the interface in general, so that nobody
would try to access RSP or RIP (and RAX in AMD's case) before calling
->cache_regs().
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-20 21:24 ` Marcelo Tosatti
@ 2008-06-21 7:04 ` Avi Kivity
0 siblings, 0 replies; 12+ messages in thread
From: Avi Kivity @ 2008-06-21 7:04 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm-devel
Marcelo Tosatti wrote:
>>
>> ugh, another callback. how about instead
>>
>> /* in vcpu structure */
>> u16 regs_available;
>> u16 regs_dirty;
>>
>> /* read from cache if possible */
>> if (!test_bit(VCPU_REG_RAX, ®s_available))
>> ->cache_regs();
>> printk("%d\n", regs[VCPU_REGS_RAX]);
>>
>> /* write to cache, ->vcpu_run() will flush */
>> regs[VCPU_REGS_RAX] = 17;
>> __set_bit(VCPU_REGS_RAX, ®s_dirty);
>>
>
> I think that hiding whether registers are cached or not behing wrappers
> makes a lot of sense, but having the ->cache_regs interface split can
> also result in gains. An index argument to ->cache_regs() would do the
> trick.
>
>
Yes and yes.
> For example, there's no need to read GUEST_RSP for
> skip_emulated_instruction, thats another 50+ cycles.
>
> Unless there's something obscure that means you need to read RSP/RIP
> before accessing the now in-memory guest registers saved with "mov"
> in vmx_vcpu_run(). The comment on vcpu_load_rsp_rip seems a little
> ambiguous to me:
>
> /*
> * Sync the rsp and rip registers into the vcpu structure. This allows
> * registers to be accessed by indexing vcpu->arch.regs.
> */
>
> But I think it just refers to the interface in general, so that nobody
> would try to access RSP or RIP (and RAX in AMD's case) before calling
> ->cache_regs().
>
It refers to the fact that sometimes you don't know which registers you
refer to, e.g. in the emulator.
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
@ 2008-06-21 19:46 Marcelo Tosatti
2008-06-22 5:16 ` Avi Kivity
0 siblings, 1 reply; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-21 19:46 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Sat, Jun 21, 2008 at 10:04:18AM +0300, Avi Kivity wrote:
>> /*
>> * Sync the rsp and rip registers into the vcpu structure. This allows
>> * registers to be accessed by indexing vcpu->arch.regs.
>> */
>>
>> But I think it just refers to the interface in general, so that nobody
>> would try to access RSP or RIP (and RAX in AMD's case) before calling
>> ->cache_regs().
>>
>
> It refers to the fact that sometimes you don't know which registers you
> refer to, e.g. in the emulator.
How's this?
Test performed with idle UP guest booted with "nohz=off
clocksource=acpi_pm", so most of the exits are acpi timer reads. This
average is from available entries in the dmesg buffer, about 1500.
"regs_available" and "regs_dirty" could be 8 bits, and placed in a hole
instead of in the middle of longs.
avg cycles to exit to qemu:
before: 3376
after: 3181
195 cycles (~= 6.1% improvement)
avg cycles to exit to qemu, handle exit and return to __vcpu_run before
irq_disable:
before: 7482 cycles.
after: 7227 cycles.
255 cycles (~= 3.5% improvement)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 73f43de..ecf26e3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -32,6 +32,7 @@
#include <asm/current.h>
#include <asm/apicdef.h>
#include <asm/atomic.h>
+#include "kvm_cache_regs.h"
#include "irq.h"
#define PRId64 "d"
@@ -558,7 +559,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
struct kvm_run *run = vcpu->run;
set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
- kvm_x86_ops->cache_regs(vcpu);
+ cache_regs(vcpu, REGS_RIP);
run->tpr_access.rip = vcpu->arch.rip;
run->tpr_access.is_write = write;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 238e8f3..8554b37 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -709,21 +709,38 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc);
}
-static void svm_cache_regs(struct kvm_vcpu *vcpu)
+static void svm_cache_regs(struct kvm_vcpu *vcpu, enum kvm_reg_set regs)
{
struct vcpu_svm *svm = to_svm(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
- vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
- vcpu->arch.rip = svm->vmcb->save.rip;
+ switch (regs) {
+ case REGS_GPR:
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+ break;
+ case REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+ break;
+ case REGS_RIP:
+ vcpu->arch.rip = svm->vmcb->save.rip;
+ break;
+ }
}
-static void svm_decache_regs(struct kvm_vcpu *vcpu)
+static void svm_decache_regs(struct kvm_vcpu *vcpu, enum kvm_reg_set regs)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.rip;
+
+ switch (regs) {
+ case REGS_GPR:
+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+ break;
+ case REGS_RSP:
+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+ break;
+ case REGS_RIP:
+ svm->vmcb->save.rip = vcpu->arch.rip;
+ break;
+ }
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e4278d..b7a988c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
+#include "kvm_cache_regs.h"
#include <asm/io.h>
#include <asm/desc.h>
@@ -707,9 +708,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
unsigned long rip;
u32 interruptibility;
- rip = vmcs_readl(GUEST_RIP);
+ cache_regs(vcpu, REGS_RIP);
+ rip = vcpu->arch.rip;
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- vmcs_writel(GUEST_RIP, rip);
+ vcpu->arch.rip = rip;
+ decache_regs(vcpu, REGS_RIP);
/*
* We emulated an instruction, so temporary interrupt blocking
@@ -935,20 +938,48 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
* Sync the rsp and rip registers into the vcpu structure. This allows
* registers to be accessed by indexing vcpu->arch.regs.
*/
-static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_cache_regs(struct kvm_vcpu *vcpu, enum kvm_reg_set regs)
{
- vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
- vcpu->arch.rip = vmcs_readl(GUEST_RIP);
+ switch (regs) {
+ case REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+ break;
+ case REGS_RIP:
+ vcpu->arch.rip = vmcs_readl(GUEST_RIP);
+ break;
+ case REGS_GPR:
+ break;
+ }
}
/*
* Syncs rsp and rip back into the vmcs. Should be called after possible
* modification.
*/
+static void vmx_decache_regs(struct kvm_vcpu *vcpu, enum kvm_reg_set regs)
+{
+ switch (regs) {
+ case REGS_RSP:
+ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+ break;
+ case REGS_RIP:
+ vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+ break;
+ case REGS_GPR:
+ break;
+ }
+}
+
+static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
+{
+ cache_regs(vcpu, REGS_RSP);
+ cache_regs(vcpu, REGS_RIP);
+}
+
static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
{
- vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+ decache_regs(vcpu, REGS_RSP);
+ decache_regs(vcpu, REGS_RIP);
}
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
@@ -3213,8 +3244,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
- .cache_regs = vcpu_load_rsp_rip,
- .decache_regs = vcpu_put_rsp_rip,
+ .cache_regs = vmx_cache_regs,
+ .decache_regs = vmx_decache_regs,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 26b051b..0a8d83b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -19,6 +19,7 @@
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
+#include "kvm_cache_regs.h"
#include <linux/clocksource.h>
#include <linux/kvm.h>
@@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
struct kvm_x86_ops *kvm_x86_ops;
+EXPORT_SYMBOL(kvm_x86_ops);
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -1778,6 +1780,16 @@ static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
return dev;
}
+static void flush_regs(struct kvm_vcpu *vcpu)
+{
+ if (__test_and_clear_bit(REGS_RSP, &vcpu->arch.regs_dirty))
+ kvm_x86_ops->decache_regs(vcpu, REGS_RSP);
+ if (__test_and_clear_bit(REGS_RIP, &vcpu->arch.regs_dirty))
+ kvm_x86_ops->decache_regs(vcpu, REGS_RIP);
+ if (__test_and_clear_bit(REGS_GPR, &vcpu->arch.regs_dirty))
+ kvm_x86_ops->decache_regs(vcpu, REGS_GPR);
+}
+
int emulator_read_std(unsigned long addr,
void *val,
unsigned int bytes,
@@ -2060,7 +2072,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
struct decode_cache *c;
vcpu->arch.mmio_fault_cr2 = cr2;
- kvm_x86_ops->cache_regs(vcpu);
+ cache_all_regs(vcpu);
vcpu->mmio_is_write = 0;
vcpu->arch.pio.string = 0;
@@ -2141,7 +2153,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
- kvm_x86_ops->decache_regs(vcpu);
+ decache_all_regs(vcpu);
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
if (vcpu->mmio_is_write) {
@@ -2195,7 +2207,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
long delta;
int r;
- kvm_x86_ops->cache_regs(vcpu);
+ cache_regs(vcpu, REGS_GPR);
if (!io->string) {
if (io->in)
@@ -2205,7 +2217,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
if (io->in) {
r = pio_copy_data(vcpu);
if (r) {
- kvm_x86_ops->cache_regs(vcpu);
+ kvm_x86_ops->cache_regs(vcpu, REGS_GPR);
return r;
}
}
@@ -2228,7 +2240,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RSI] += delta;
}
- kvm_x86_ops->decache_regs(vcpu);
+ decache_regs(vcpu, REGS_GPR);
io->count -= io->cur_count;
io->cur_count = 0;
@@ -2302,7 +2314,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
handler);
- kvm_x86_ops->cache_regs(vcpu);
+ cache_regs(vcpu, REGS_GPR);
memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2488,7 +2500,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int r = 1;
- kvm_x86_ops->cache_regs(vcpu);
+ cache_regs(vcpu, REGS_GPR);
nr = vcpu->arch.regs[VCPU_REGS_RAX];
a0 = vcpu->arch.regs[VCPU_REGS_RBX];
@@ -2518,7 +2530,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
break;
}
vcpu->arch.regs[VCPU_REGS_RAX] = ret;
- kvm_x86_ops->decache_regs(vcpu);
+ decache_regs(vcpu, REGS_GPR);
++vcpu->stat.hypercalls;
return r;
}
@@ -2537,7 +2549,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
*/
kvm_mmu_zap_all(vcpu->kvm);
- kvm_x86_ops->cache_regs(vcpu);
+ cache_all_regs(vcpu);
kvm_x86_ops->patch_hypercall(vcpu, instruction);
if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
!= X86EMUL_CONTINUE)
@@ -2669,7 +2681,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
u32 function, index;
struct kvm_cpuid_entry2 *e, *best;
- kvm_x86_ops->cache_regs(vcpu);
+ cache_regs(vcpu, REGS_GPR);
function = vcpu->arch.regs[VCPU_REGS_RAX];
index = vcpu->arch.regs[VCPU_REGS_RCX];
vcpu->arch.regs[VCPU_REGS_RAX] = 0;
@@ -2698,7 +2710,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
}
- kvm_x86_ops->decache_regs(vcpu);
+ decache_regs(vcpu, REGS_GPR);
kvm_x86_ops->skip_emulated_instruction(vcpu);
KVMTRACE_5D(CPUID, vcpu, function,
(u32)vcpu->arch.regs[VCPU_REGS_RAX],
@@ -2811,6 +2823,8 @@ again:
}
}
+ flush_regs(vcpu);
+
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
kvm_inject_pending_timer_irqs(vcpu);
@@ -2865,6 +2879,8 @@ again:
local_irq_enable();
++vcpu->stat.exits;
+ vcpu->arch.regs_available = 0;
+ vcpu->arch.regs_dirty = 0;
/*
* We must have an instruction between local_irq_enable() and
@@ -2884,7 +2900,7 @@ again:
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
- kvm_x86_ops->cache_regs(vcpu);
+ cache_regs(vcpu, REGS_RIP);
profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
}
@@ -2969,9 +2985,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
#endif
if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
- kvm_x86_ops->cache_regs(vcpu);
+ cache_regs(vcpu, REGS_GPR);
vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
- kvm_x86_ops->decache_regs(vcpu);
+ decache_regs(vcpu, REGS_GPR);
}
r = __vcpu_run(vcpu, kvm_run);
@@ -2988,7 +3004,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
+ cache_all_regs(vcpu);
regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
@@ -3049,7 +3065,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.rip = regs->rip;
kvm_x86_ops->set_rflags(vcpu, regs->rflags);
- kvm_x86_ops->decache_regs(vcpu);
+ decache_all_regs(vcpu);
vcpu->arch.exception.pending = false;
@@ -3525,8 +3541,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
}
+ cache_all_regs(vcpu);
kvm_x86_ops->skip_emulated_instruction(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
if (nseg_desc.type & 8)
ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
@@ -3551,7 +3567,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
tr_seg.type = 11;
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
out:
- kvm_x86_ops->decache_regs(vcpu);
+ decache_all_regs(vcpu);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 851184d..56f01cc 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -120,6 +120,12 @@ enum {
VCPU_SREG_LDTR,
};
+enum kvm_reg_set {
+ REGS_GPR,
+ REGS_RSP,
+ REGS_RIP,
+};
+
#include <asm/kvm_x86_emulate.h>
#define KVM_NR_MEM_OBJS 40
@@ -217,8 +223,10 @@ struct kvm_vcpu_arch {
int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
- unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
- unsigned long rip; /* needs vcpu_load_rsp_rip() */
+ unsigned long regs[NR_VCPU_REGS]; /* needs cache_regs() */
+ unsigned long rip; /* needs cache_regs() */
+ u16 regs_available;
+ u16 regs_dirty;
unsigned long cr0;
unsigned long cr2;
@@ -410,8 +418,8 @@ struct kvm_x86_ops {
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
int *exception);
- void (*cache_regs)(struct kvm_vcpu *vcpu);
- void (*decache_regs)(struct kvm_vcpu *vcpu);
+ void (*cache_regs)(struct kvm_vcpu *vcpu, enum kvm_reg_set regs);
+ void (*decache_regs)(struct kvm_vcpu *vcpu, enum kvm_reg_set regs);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
--- /dev/null 2008-06-20 14:49:09.418709161 -0300
+++ b/arch/x86/kvm/kvm_cache_regs.h 2008-06-21 14:12:49.000000000 -0300
@@ -0,0 +1,27 @@
+#ifndef ASM_KVM_CACHE_REGS_H
+#define ASM_KVM_CACHE_REGS_H
+static inline void cache_regs(struct kvm_vcpu *vcpu, enum kvm_reg_set reg_set)
+{
+ if (!__test_and_set_bit(reg_set, &vcpu->arch.regs_available))
+ kvm_x86_ops->cache_regs(vcpu, reg_set);
+}
+
+static inline void decache_regs(struct kvm_vcpu *vcpu, enum kvm_reg_set reg_set)
+{
+ __set_bit(reg_set, &vcpu->arch.regs_dirty);
+}
+
+static inline void cache_all_regs(struct kvm_vcpu *vcpu)
+{
+ cache_regs(vcpu, REGS_RSP);
+ cache_regs(vcpu, REGS_RIP);
+ cache_regs(vcpu, REGS_GPR);
+}
+
+static inline void decache_all_regs(struct kvm_vcpu *vcpu)
+{
+ decache_regs(vcpu, REGS_RSP);
+ decache_regs(vcpu, REGS_RIP);
+ decache_regs(vcpu, REGS_GPR);
+}
+#endif
----- End forwarded message -----
----- End forwarded message -----
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-21 19:46 RFC: cache_regs in kvm_emulate_pio Marcelo Tosatti
@ 2008-06-22 5:16 ` Avi Kivity
2008-06-22 18:05 ` Marcelo Tosatti
2008-06-24 19:33 ` Marcelo Tosatti
0 siblings, 2 replies; 12+ messages in thread
From: Avi Kivity @ 2008-06-22 5:16 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm-devel
Marcelo Tosatti wrote:
> On Sat, Jun 21, 2008 at 10:04:18AM +0300, Avi Kivity wrote:
>
>>> /*
>>> * Sync the rsp and rip registers into the vcpu structure. This allows
>>> * registers to be accessed by indexing vcpu->arch.regs.
>>> */
>>>
>>> But I think it just refers to the interface in general, so that nobody
>>> would try to access RSP or RIP (and RAX in AMD's case) before calling
>>> ->cache_regs().
>>>
>>>
>> It refers to the fact that sometimes you don't know which registers you
>> refer to, e.g. in the emulator.
>>
>
> How's this?
>
>
Looks good, but we can aim higher. The cache_regs() API was always
confusing (I usually swap the two parts). If we replace all ->regs
access with accessors, we can make it completely transparent.
It will be tricky in the emulator, but worthwhile, no?
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-22 5:16 ` Avi Kivity
@ 2008-06-22 18:05 ` Marcelo Tosatti
2008-06-24 19:33 ` Marcelo Tosatti
1 sibling, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-22 18:05 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Sun, Jun 22, 2008 at 08:16:19AM +0300, Avi Kivity wrote:
> Looks good, but we can aim higher. The cache_regs() API was always
> confusing (I usually swap the two parts). If we replace all ->regs
> access with accessors, we can make it completely transparent.
>
> It will be tricky in the emulator, but worthwhile, no?
Yes, agree. Will go for accessors.
Thanks.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-22 5:16 ` Avi Kivity
2008-06-22 18:05 ` Marcelo Tosatti
@ 2008-06-24 19:33 ` Marcelo Tosatti
2008-06-26 9:18 ` Avi Kivity
1 sibling, 1 reply; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-24 19:33 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Sun, Jun 22, 2008 at 08:16:19AM +0300, Avi Kivity wrote:
> Marcelo Tosatti wrote:
>> On Sat, Jun 21, 2008 at 10:04:18AM +0300, Avi Kivity wrote:
>>
>>>> /*
>>>> * Sync the rsp and rip registers into the vcpu structure. This allows
>>>> * registers to be accessed by indexing vcpu->arch.regs.
>>>> */
>>>>
>>>> But I think it just refers to the interface in general, so that nobody
>>>> would try to access RSP or RIP (and RAX in AMD's case) before calling
>>>> ->cache_regs().
>>>>
>>> It refers to the fact that sometimes you don't know which registers
>>> you refer to, e.g. in the emulator.
>>>
>>
>> How's this?
>>
>>
>
> Looks good, but we can aim higher. The cache_regs() API was always
> confusing (I usually swap the two parts). If we replace all ->regs
> access with accessors, we can make it completely transparent.
>
> It will be tricky in the emulator, but worthwhile, no?
OK, in the emulator an interface on top of guest_register_write() is
needed to save registers so that the original contents can be restored
on failure. Some brave soul can do it later, so I added a TODO in x86.c.
Smells better now?
--- dev/null 2008-06-24 14:36:42.383774904 -0300
+++ b/arch/x86/kvm/kvm_cache_regs.h 2008-06-24 15:26:02.000000000 -0300
@@ -0,0 +1,21 @@
+#ifndef ASM_KVM_CACHE_REGS_H
+#define ASM_KVM_CACHE_REGS_H
+
+static inline unsigned long guest_register_read(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ if (!__test_and_set_bit(reg, &vcpu->arch.regs_available))
+ kvm_x86_ops->cache_regs(vcpu, reg);
+
+ return vcpu->arch.regs[reg];
+}
+
+static inline void guest_register_write(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg,
+ unsigned long val)
+{
+ vcpu->arch.regs[reg] = val;
+ __set_bit(reg, &vcpu->arch.regs_dirty);
+}
+
+#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 73f43de..97919b6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -32,6 +32,7 @@
#include <asm/current.h>
#include <asm/apicdef.h>
#include <asm/atomic.h>
+#include "kvm_cache_regs.h"
#include "irq.h"
#define PRId64 "d"
@@ -558,8 +559,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
struct kvm_run *run = vcpu->run;
set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
- kvm_x86_ops->cache_regs(vcpu);
- run->tpr_access.rip = vcpu->arch.rip;
+ run->tpr_access.rip = guest_register_read(vcpu, VCPU_REGS_RIP);
run->tpr_access.is_write = write;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 238e8f3..acd96f6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -18,6 +18,7 @@
#include "kvm_svm.h"
#include "irq.h"
#include "mmu.h"
+#include "kvm_cache_regs.h"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -241,7 +242,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm->vmcb->save.rip,
svm->next_rip);
- vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
+ svm->vmcb->save.rip = svm->next_rip;
+ guest_register_write(vcpu, VCPU_REGS_RIP, svm->vmcb->save.rip);
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
vcpu->arch.interrupt_window_open = 1;
@@ -709,21 +711,42 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc);
}
-static void svm_cache_regs(struct kvm_vcpu *vcpu)
+static void svm_cache_regs(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
struct vcpu_svm *svm = to_svm(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
- vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
- vcpu->arch.rip = svm->vmcb->save.rip;
+ switch (reg) {
+ case VCPU_REGS_RAX:
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+ break;
+ case VCPU_REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+ break;
+ case VCPU_REGS_RIP:
+ vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+ break;
+ default:
+ break;
+ }
}
-static void svm_decache_regs(struct kvm_vcpu *vcpu)
+static void svm_decache_regs(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.rip;
+
+ switch (reg) {
+ case VCPU_REGS_RAX:
+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+ break;
+ case VCPU_REGS_RSP:
+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+ break;
+ case VCPU_REGS_RIP:
+ svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+ break;
+ default:
+ break;
+ }
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e4278d..240f16a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
+#include "kvm_cache_regs.h"
#include <asm/io.h>
#include <asm/desc.h>
@@ -707,9 +708,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
unsigned long rip;
u32 interruptibility;
- rip = vmcs_readl(GUEST_RIP);
+ rip = guest_register_read(vcpu, VCPU_REGS_RIP);
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- vmcs_writel(GUEST_RIP, rip);
+ guest_register_write(vcpu, VCPU_REGS_RIP, rip);
/*
* We emulated an instruction, so temporary interrupt blocking
@@ -931,24 +932,32 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
return ret;
}
-/*
- * Sync the rsp and rip registers into the vcpu structure. This allows
- * registers to be accessed by indexing vcpu->arch.regs.
- */
-static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_cache_regs(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
- vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
- vcpu->arch.rip = vmcs_readl(GUEST_RIP);
+ switch (reg) {
+ case VCPU_REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+ break;
+ case VCPU_REGS_RIP:
+ vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
+ break;
+ default:
+ break;
+ }
}
-/*
- * Syncs rsp and rip back into the vmcs. Should be called after possible
- * modification.
- */
-static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_decache_regs(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
- vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+ switch (reg) {
+ case VCPU_REGS_RSP:
+ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+ break;
+ case VCPU_REGS_RIP:
+ vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ break;
+ default:
+ break;
+ }
}
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
@@ -2370,22 +2379,18 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
(u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
switch (cr) {
case 0:
- vcpu_load_rsp_rip(vcpu);
kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 3:
- vcpu_load_rsp_rip(vcpu);
kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 4:
- vcpu_load_rsp_rip(vcpu);
kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 8:
- vcpu_load_rsp_rip(vcpu);
kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
@@ -2395,7 +2400,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
};
break;
case 2: /* clts */
- vcpu_load_rsp_rip(vcpu);
vmx_fpu_deactivate(vcpu);
vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
@@ -2406,9 +2410,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
case 1: /*mov from cr*/
switch (cr) {
case 3:
- vcpu_load_rsp_rip(vcpu);
vcpu->arch.regs[reg] = vcpu->arch.cr3;
- vcpu_put_rsp_rip(vcpu);
KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
(u32)vcpu->arch.regs[reg],
(u32)((u64)vcpu->arch.regs[reg] >> 32),
@@ -2416,9 +2418,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
skip_emulated_instruction(vcpu);
return 1;
case 8:
- vcpu_load_rsp_rip(vcpu);
vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
- vcpu_put_rsp_rip(vcpu);
KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
(u32)vcpu->arch.regs[reg], handler);
skip_emulated_instruction(vcpu);
@@ -2452,7 +2452,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & 7;
reg = (exit_qualification >> 8) & 15;
- vcpu_load_rsp_rip(vcpu);
if (exit_qualification & 16) {
/* mov from dr */
switch (dr) {
@@ -2465,12 +2464,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
default:
val = 0;
}
- vcpu->arch.regs[reg] = val;
+ guest_register_write(vcpu, reg, val);
KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
} else {
/* mov to dr */
}
- vcpu_put_rsp_rip(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
@@ -3213,8 +3211,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
- .cache_regs = vcpu_load_rsp_rip,
- .decache_regs = vcpu_put_rsp_rip,
+ .cache_regs = vmx_cache_regs,
+ .decache_regs = vmx_decache_regs,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 26b051b..9495dd4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -19,6 +19,7 @@
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
+#include "kvm_cache_regs.h"
#include <linux/clocksource.h>
#include <linux/kvm.h>
@@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
struct kvm_x86_ops *kvm_x86_ops;
+EXPORT_SYMBOL(kvm_x86_ops);
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -1778,6 +1780,16 @@ static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
return dev;
}
+static void flush_regs(struct kvm_vcpu *vcpu)
+{
+ if (__test_and_clear_bit(VCPU_REGS_RSP, &vcpu->arch.regs_dirty))
+ kvm_x86_ops->decache_regs(vcpu, VCPU_REGS_RSP);
+ if (__test_and_clear_bit(VCPU_REGS_RIP, &vcpu->arch.regs_dirty))
+ kvm_x86_ops->decache_regs(vcpu, VCPU_REGS_RIP);
+ if (__test_and_clear_bit(VCPU_REGS_RAX, &vcpu->arch.regs_dirty))
+ kvm_x86_ops->decache_regs(vcpu, VCPU_REGS_RAX);
+}
+
int emulator_read_std(unsigned long addr,
void *val,
unsigned int bytes,
@@ -2028,7 +2040,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
{
u8 opcodes[4];
- unsigned long rip = vcpu->arch.rip;
+ unsigned long rip = guest_register_read(vcpu, VCPU_REGS_RIP);
unsigned long rip_linear;
if (!printk_ratelimit())
@@ -2050,6 +2062,23 @@ static struct x86_emulate_ops emulate_ops = {
.cmpxchg_emulated = emulator_cmpxchg_emulated,
};
+void cache_all_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->cache_regs(vcpu, VCPU_REGS_RAX);
+ kvm_x86_ops->cache_regs(vcpu, VCPU_REGS_RSP);
+ kvm_x86_ops->cache_regs(vcpu, VCPU_REGS_RIP);
+}
+
+void decache_all_regs(struct kvm_vcpu *vcpu)
+{
+ guest_register_write(vcpu, VCPU_REGS_RAX,
+ vcpu->arch.regs[VCPU_REGS_RAX]);
+ guest_register_write(vcpu, VCPU_REGS_RSP,
+ vcpu->arch.regs[VCPU_REGS_RSP]);
+ guest_register_write(vcpu, VCPU_REGS_RIP,
+ vcpu->arch.regs[VCPU_REGS_RIP]);
+}
+
int emulate_instruction(struct kvm_vcpu *vcpu,
struct kvm_run *run,
unsigned long cr2,
@@ -2060,7 +2089,13 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
struct decode_cache *c;
vcpu->arch.mmio_fault_cr2 = cr2;
- kvm_x86_ops->cache_regs(vcpu);
+ /*
+ * TODO: fix x86_emulate.c to use guest_read/write_register
+ * instead of direct ->regs accesses, can save hundred cycles
+ * on Intel for instructions that don't read/change RSP, for
+ * for example.
+ */
+ cache_all_regs(vcpu);
vcpu->mmio_is_write = 0;
vcpu->arch.pio.string = 0;
@@ -2141,7 +2176,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
- kvm_x86_ops->decache_regs(vcpu);
+ decache_all_regs(vcpu);
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
if (vcpu->mmio_is_write) {
@@ -2194,18 +2229,19 @@ int complete_pio(struct kvm_vcpu *vcpu)
struct kvm_pio_request *io = &vcpu->arch.pio;
long delta;
int r;
-
- kvm_x86_ops->cache_regs(vcpu);
+ unsigned long val;
if (!io->string) {
- if (io->in)
- memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
- io->size);
+ if (io->in) {
+ val = guest_register_read(vcpu, VCPU_REGS_RAX);
+ memcpy(&val, vcpu->arch.pio_data, io->size);
+ guest_register_write(vcpu, VCPU_REGS_RAX, val);
+ }
} else {
if (io->in) {
r = pio_copy_data(vcpu);
if (r) {
- kvm_x86_ops->cache_regs(vcpu);
+ kvm_x86_ops->cache_regs(vcpu, VCPU_REGS_RAX);
return r;
}
}
@@ -2217,19 +2253,24 @@ int complete_pio(struct kvm_vcpu *vcpu)
* The size of the register should really depend on
* current address size.
*/
- vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
+ val = guest_register_read(vcpu, VCPU_REGS_RCX);
+ val -= delta;
+ guest_register_write(vcpu, VCPU_REGS_RCX, val);
}
if (io->down)
delta = -delta;
delta *= io->size;
- if (io->in)
- vcpu->arch.regs[VCPU_REGS_RDI] += delta;
- else
- vcpu->arch.regs[VCPU_REGS_RSI] += delta;
+ if (io->in) {
+ val = guest_register_read(vcpu, VCPU_REGS_RDI);
+ val += delta;
+ guest_register_write(vcpu, VCPU_REGS_RDI, val);
+ } else {
+ val = guest_register_read(vcpu, VCPU_REGS_RSI);
+ val += delta;
+ guest_register_write(vcpu, VCPU_REGS_RSI, val);
+ }
}
- kvm_x86_ops->decache_regs(vcpu);
-
io->count -= io->cur_count;
io->cur_count = 0;
@@ -2282,6 +2323,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned port)
{
struct kvm_io_device *pio_dev;
+ unsigned long val;
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2302,8 +2344,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
handler);
- kvm_x86_ops->cache_regs(vcpu);
- memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
+ val = guest_register_read(vcpu, VCPU_REGS_RAX);
+ memcpy(vcpu->arch.pio_data, &val, 4);
kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2488,13 +2530,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int r = 1;
- kvm_x86_ops->cache_regs(vcpu);
-
- nr = vcpu->arch.regs[VCPU_REGS_RAX];
- a0 = vcpu->arch.regs[VCPU_REGS_RBX];
- a1 = vcpu->arch.regs[VCPU_REGS_RCX];
- a2 = vcpu->arch.regs[VCPU_REGS_RDX];
- a3 = vcpu->arch.regs[VCPU_REGS_RSI];
+ nr = guest_register_read(vcpu, VCPU_REGS_RAX);
+ a0 = guest_register_read(vcpu, VCPU_REGS_RBX);
+ a1 = guest_register_read(vcpu, VCPU_REGS_RCX);
+ a2 = guest_register_read(vcpu, VCPU_REGS_RDX);
+ a3 = guest_register_read(vcpu, VCPU_REGS_RSI);
KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
@@ -2517,8 +2557,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
ret = -KVM_ENOSYS;
break;
}
- vcpu->arch.regs[VCPU_REGS_RAX] = ret;
- kvm_x86_ops->decache_regs(vcpu);
+ guest_register_write(vcpu, VCPU_REGS_RAX, ret);
++vcpu->stat.hypercalls;
return r;
}
@@ -2528,6 +2567,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
{
char instruction[3];
int ret = 0;
+ unsigned long rip = guest_register_read(vcpu, VCPU_REGS_RIP);
/*
@@ -2537,9 +2577,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
*/
kvm_mmu_zap_all(vcpu->kvm);
- kvm_x86_ops->cache_regs(vcpu);
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
+ if (emulator_write_emulated(rip, instruction, 3, vcpu)
!= X86EMUL_CONTINUE)
ret = -EFAULT;
@@ -2669,13 +2708,12 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
u32 function, index;
struct kvm_cpuid_entry2 *e, *best;
- kvm_x86_ops->cache_regs(vcpu);
- function = vcpu->arch.regs[VCPU_REGS_RAX];
- index = vcpu->arch.regs[VCPU_REGS_RCX];
- vcpu->arch.regs[VCPU_REGS_RAX] = 0;
- vcpu->arch.regs[VCPU_REGS_RBX] = 0;
- vcpu->arch.regs[VCPU_REGS_RCX] = 0;
- vcpu->arch.regs[VCPU_REGS_RDX] = 0;
+ function = guest_register_read(vcpu, VCPU_REGS_RAX);
+ index = guest_register_read(vcpu, VCPU_REGS_RCX);
+ guest_register_write(vcpu, VCPU_REGS_RAX, 0);
+ guest_register_write(vcpu, VCPU_REGS_RBX, 0);
+ guest_register_write(vcpu, VCPU_REGS_RCX, 0);
+ guest_register_write(vcpu, VCPU_REGS_RDX, 0);
best = NULL;
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
e = &vcpu->arch.cpuid_entries[i];
@@ -2693,12 +2731,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
best = e;
}
if (best) {
- vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
- vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
- vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
- vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
+ guest_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+ guest_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+ guest_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+ guest_register_write(vcpu, VCPU_REGS_RDX, best->edx);
}
- kvm_x86_ops->decache_regs(vcpu);
kvm_x86_ops->skip_emulated_instruction(vcpu);
KVMTRACE_5D(CPUID, vcpu, function,
(u32)vcpu->arch.regs[VCPU_REGS_RAX],
@@ -2811,6 +2848,8 @@ again:
}
}
+ flush_regs(vcpu);
+
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
kvm_inject_pending_timer_irqs(vcpu);
@@ -2865,6 +2904,8 @@ again:
local_irq_enable();
++vcpu->stat.exits;
+ vcpu->arch.regs_available = KVM_CACHED_REGS;
+ vcpu->arch.regs_dirty = 0;
/*
* We must have an instruction between local_irq_enable() and
@@ -2884,8 +2925,8 @@ again:
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
- kvm_x86_ops->cache_regs(vcpu);
- profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
+ unsigned long rip = guest_register_read(vcpu, VCPU_REGS_RIP);
+ profile_hit(KVM_PROFILING, (void *)rip);
}
if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
@@ -2968,11 +3009,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
}
#endif
- if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
- kvm_x86_ops->cache_regs(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
- kvm_x86_ops->decache_regs(vcpu);
- }
+ if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
+ guest_register_write(vcpu, VCPU_REGS_RAX,
+ kvm_run->hypercall.ret);
r = __vcpu_run(vcpu, kvm_run);
@@ -2988,28 +3027,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
-
- regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
- regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
- regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
- regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
- regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
- regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
- regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
+ regs->rax = guest_register_read(vcpu, VCPU_REGS_RAX);
+ regs->rbx = guest_register_read(vcpu, VCPU_REGS_RBX);
+ regs->rcx = guest_register_read(vcpu, VCPU_REGS_RCX);
+ regs->rdx = guest_register_read(vcpu, VCPU_REGS_RDX);
+ regs->rsi = guest_register_read(vcpu, VCPU_REGS_RSI);
+ regs->rdi = guest_register_read(vcpu, VCPU_REGS_RDI);
+ regs->rsp = guest_register_read(vcpu, VCPU_REGS_RSP);
+ regs->rbp = guest_register_read(vcpu, VCPU_REGS_RBP);
#ifdef CONFIG_X86_64
- regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
- regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
- regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
- regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
- regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
- regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
- regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
- regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
+ regs->r8 = guest_register_read(vcpu, VCPU_REGS_R8);
+ regs->r9 = guest_register_read(vcpu, VCPU_REGS_R9);
+ regs->r10 = guest_register_read(vcpu, VCPU_REGS_R10);
+ regs->r11 = guest_register_read(vcpu, VCPU_REGS_R11);
+ regs->r12 = guest_register_read(vcpu, VCPU_REGS_R12);
+ regs->r13 = guest_register_read(vcpu, VCPU_REGS_R13);
+ regs->r14 = guest_register_read(vcpu, VCPU_REGS_R14);
+ regs->r15 = guest_register_read(vcpu, VCPU_REGS_R15);
#endif
- regs->rip = vcpu->arch.rip;
+ regs->rip = guest_register_read(vcpu, VCPU_REGS_RIP);
regs->rflags = kvm_x86_ops->get_rflags(vcpu);
/*
@@ -3027,29 +3064,29 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
- vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
- vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
- vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
- vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
- vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
- vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
- vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
+ guest_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
+ guest_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
+ guest_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
+ guest_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
+ guest_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
+ guest_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
+ guest_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
+ guest_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
#ifdef CONFIG_X86_64
- vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
- vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
- vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
- vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
- vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
- vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
- vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
- vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
+ guest_register_write(vcpu, VCPU_REGS_R8, regs->r8);
+ guest_register_write(vcpu, VCPU_REGS_R9, regs->r9);
+ guest_register_write(vcpu, VCPU_REGS_R10, regs->r10);
+ guest_register_write(vcpu, VCPU_REGS_R11, regs->r11);
+ guest_register_write(vcpu, VCPU_REGS_R12, regs->r12);
+ guest_register_write(vcpu, VCPU_REGS_R13, regs->r13);
+ guest_register_write(vcpu, VCPU_REGS_R14, regs->r14);
+ guest_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+
#endif
- vcpu->arch.rip = regs->rip;
+ guest_register_write(vcpu, VCPU_REGS_RIP, regs->rip);
kvm_x86_ops->set_rflags(vcpu, regs->rflags);
- kvm_x86_ops->decache_regs(vcpu);
vcpu->arch.exception.pending = false;
@@ -3323,17 +3360,17 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
struct tss_segment_32 *tss)
{
tss->cr3 = vcpu->arch.cr3;
- tss->eip = vcpu->arch.rip;
+ tss->eip = guest_register_read(vcpu, VCPU_REGS_RIP);
tss->eflags = kvm_x86_ops->get_rflags(vcpu);
- tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
- tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
- tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
- tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
- tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
- tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
- tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
-
+ tss->eax = guest_register_read(vcpu, VCPU_REGS_RAX);
+ tss->eax = guest_register_read(vcpu, VCPU_REGS_RAX);
+ tss->ecx = guest_register_read(vcpu, VCPU_REGS_RCX);
+ tss->edx = guest_register_read(vcpu, VCPU_REGS_RDX);
+ tss->ebx = guest_register_read(vcpu, VCPU_REGS_RBX);
+ tss->esp = guest_register_read(vcpu, VCPU_REGS_RSP);
+ tss->ebp = guest_register_read(vcpu, VCPU_REGS_RBP);
+ tss->esi = guest_register_read(vcpu, VCPU_REGS_RSI);
+ tss->edi = guest_register_read(vcpu, VCPU_REGS_RDI);
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
@@ -3349,17 +3386,17 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
{
kvm_set_cr3(vcpu, tss->cr3);
- vcpu->arch.rip = tss->eip;
+ guest_register_write(vcpu, VCPU_REGS_RIP, tss->eip);
kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
- vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
- vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
- vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
- vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
- vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
- vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
- vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
- vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+ guest_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
+ guest_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
+ guest_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
+ guest_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
+ guest_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
+ guest_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
+ guest_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
+ guest_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
return 1;
@@ -3387,16 +3424,16 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
static void save_state_to_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
- tss->ip = vcpu->arch.rip;
+ tss->ip = guest_register_read(vcpu, VCPU_REGS_RIP);
tss->flag = kvm_x86_ops->get_rflags(vcpu);
- tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
- tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
- tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
- tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
- tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
- tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
- tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
- tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+ tss->ax = guest_register_read(vcpu, VCPU_REGS_RAX);
+ tss->cx = guest_register_read(vcpu, VCPU_REGS_RCX);
+ tss->dx = guest_register_read(vcpu, VCPU_REGS_RDX);
+ tss->bx = guest_register_read(vcpu, VCPU_REGS_RBX);
+ tss->sp = guest_register_read(vcpu, VCPU_REGS_RSP);
+ tss->bp = guest_register_read(vcpu, VCPU_REGS_RBP);
+ tss->si = guest_register_read(vcpu, VCPU_REGS_RSI);
+ tss->di = guest_register_read(vcpu, VCPU_REGS_RDI);
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
@@ -3409,16 +3446,16 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu,
static int load_state_from_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
- vcpu->arch.rip = tss->ip;
+ guest_register_write(vcpu, VCPU_REGS_RIP, tss->ip);
kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
- vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
- vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
- vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
- vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
- vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
- vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
- vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
- vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+ guest_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
+ guest_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
+ guest_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
+ guest_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
+ guest_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
+ guest_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
+ guest_register_write(vcpu, VCPU_REGS_RSI, tss->si);
+ guest_register_write(vcpu, VCPU_REGS_RDI, tss->di);
if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
return 1;
@@ -3526,7 +3563,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
}
kvm_x86_ops->skip_emulated_instruction(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
if (nseg_desc.type & 8)
ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
@@ -3551,7 +3587,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
tr_seg.type = 11;
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
out:
- kvm_x86_ops->decache_regs(vcpu);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 38926b7..c74b9d9 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -26,6 +26,7 @@
#define DPRINTF(_f, _a ...) printf(_f , ## _a)
#else
#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
#define DPRINTF(x...) do {} while (0)
#endif
#include <linux/module.h>
@@ -806,7 +807,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = guest_register_read(ctxt->vcpu, VCPU_REGS_RIP);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
switch (mode) {
@@ -1245,7 +1246,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (c->rep_prefix && (c->d & String)) {
/* All REP prefixes have the same first termination condition */
if (c->regs[VCPU_REGS_RCX] == 0) {
- ctxt->vcpu->arch.rip = c->eip;
+ guest_register_write(ctxt->vcpu, VCPU_REGS_RIP, c->eip);
goto done;
}
/* The second termination condition only applies for REPE
@@ -1259,17 +1260,20 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
(c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0)) {
- ctxt->vcpu->arch.rip = c->eip;
+ guest_register_write(ctxt->vcpu,
+ VCPU_REGS_RIP,
+ c->eip);
goto done;
}
if ((c->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
- ctxt->vcpu->arch.rip = c->eip;
+ guest_register_write(ctxt->vcpu, VCPU_REGS_RIP,
+ c->eip);
goto done;
}
}
c->regs[VCPU_REGS_RCX]--;
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = guest_register_read(ctxt->vcpu, VCPU_REGS_RIP);
}
if (c->src.type == OP_MEM) {
@@ -1750,7 +1754,7 @@ writeback:
/* Commit shadow register state. */
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
- ctxt->vcpu->arch.rip = c->eip;
+ guest_register_write(ctxt->vcpu, VCPU_REGS_RIP, c->eip);
done:
if (rc == X86EMUL_UNHANDLEABLE) {
@@ -1775,7 +1779,7 @@ twobyte_insn:
goto done;
/* Let the processor re-execute the fixed hypercall */
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = guest_register_read(ctxt->vcpu, VCPU_REGS_RIP);
/* Disable writeback. */
c->dst.type = OP_NONE;
break;
@@ -1871,7 +1875,7 @@ twobyte_insn:
rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = guest_register_read(ctxt->vcpu, VCPU_REGS_RIP);
}
rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE;
@@ -1881,7 +1885,7 @@ twobyte_insn:
rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = guest_register_read(ctxt->vcpu, VCPU_REGS_RIP);
} else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32;
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 851184d..cc5c94b 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -87,7 +87,7 @@ extern struct list_head vm_list;
struct kvm_vcpu;
struct kvm;
-enum {
+enum kvm_reg {
VCPU_REGS_RAX = 0,
VCPU_REGS_RCX = 1,
VCPU_REGS_RDX = 2,
@@ -106,9 +106,21 @@ enum {
VCPU_REGS_R14 = 14,
VCPU_REGS_R15 = 15,
#endif
+ VCPU_REGS_RIP = 16,
NR_VCPU_REGS
};
+/*
+ * List of registers already read by kvm_x86_ops->run().
+ */
+#define KVM_CACHED_REGS ((1 << VCPU_REGS_RCX) | (1 << VCPU_REGS_RDX) | \
+ (1 << VCPU_REGS_RBX) | (1 << VCPU_REGS_RBP) | \
+ (1 << VCPU_REGS_RSI) | (1 << VCPU_REGS_RDI) | \
+ (1 << VCPU_REGS_R8) | (1 << VCPU_REGS_R9) | \
+ (1 << VCPU_REGS_R10) | (1 << VCPU_REGS_R11) | \
+ (1 << VCPU_REGS_R12) | (1 << VCPU_REGS_R13) | \
+ (1 << VCPU_REGS_R14) | (1 << VCPU_REGS_R15))
+
enum {
VCPU_SREG_ES,
VCPU_SREG_CS,
@@ -217,8 +229,9 @@ struct kvm_vcpu_arch {
int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
- unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
- unsigned long rip; /* needs vcpu_load_rsp_rip() */
+ unsigned long regs[NR_VCPU_REGS]; /* needs cache_regs() */
+ u32 regs_available;
+ u32 regs_dirty;
unsigned long cr0;
unsigned long cr2;
@@ -410,8 +423,8 @@ struct kvm_x86_ops {
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
int *exception);
- void (*cache_regs)(struct kvm_vcpu *vcpu);
- void (*decache_regs)(struct kvm_vcpu *vcpu);
+ void (*cache_regs)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ void (*decache_regs)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-24 19:33 ` Marcelo Tosatti
@ 2008-06-26 9:18 ` Avi Kivity
2008-06-26 14:52 ` Marcelo Tosatti
2008-06-26 22:15 ` Marcelo Tosatti
0 siblings, 2 replies; 12+ messages in thread
From: Avi Kivity @ 2008-06-26 9:18 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm-devel
Marcelo Tosatti wrote:
>>>
>>>
>> Looks good, but we can aim higher. The cache_regs() API was always
>> confusing (I usually swap the two parts). If we replace all ->regs
>> access with accessors, we can make it completely transparent.
>>
>> It will be tricky in the emulator, but worthwhile, no?
>>
>
> OK, in the emulator an interface on top of guest_register_write() is
> needed to save registers so that the original contents can be restored
> on failure. Some brave soul can do it later, so I added a TODO in x86.c.
>
> Smells better now?
>
>
> --- dev/null 2008-06-24 14:36:42.383774904 -0300
> +++ b/arch/x86/kvm/kvm_cache_regs.h 2008-06-24 15:26:02.000000000 -0300
> @@ -0,0 +1,21 @@
> +#ifndef ASM_KVM_CACHE_REGS_H
> +#define ASM_KVM_CACHE_REGS_H
> +
> +static inline unsigned long guest_register_read(struct kvm_vcpu *vcpu,
> + enum kvm_reg reg)
> +{
> + if (!__test_and_set_bit(reg, &vcpu->arch.regs_available))
> + kvm_x86_ops->cache_regs(vcpu, reg);
> +
> + return vcpu->arch.regs[reg];
> +}
> +
> +static inline void guest_register_write(struct kvm_vcpu *vcpu,
> + enum kvm_reg reg,
> + unsigned long val)
> +{
> + vcpu->arch.regs[reg] = val;
> + __set_bit(reg, &vcpu->arch.regs_dirty);
> +}
> +
> +#endif
>
A new header file is excessive. Also, these are global names, so please
prefix with kvm_.
> @@ -241,7 +242,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
> svm->vmcb->save.rip,
> svm->next_rip);
>
> - vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
> + svm->vmcb->save.rip = svm->next_rip;
> + guest_register_write(vcpu, VCPU_REGS_RIP, svm->vmcb->save.rip);
> svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
>
No need to write into save.rip, is there?
>
> -static void svm_cache_regs(struct kvm_vcpu *vcpu)
> +static void svm_cache_regs(struct kvm_vcpu *vcpu, enum kvm_reg reg)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
>
> - vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
> - vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
> - vcpu->arch.rip = svm->vmcb->save.rip;
> + switch (reg) {
> + case VCPU_REGS_RAX:
> + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
> + break;
> + case VCPU_REGS_RSP:
> + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
> + break;
> + case VCPU_REGS_RIP:
> + vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
> + break;
> + default:
> + break;
> + }
> }
>
For svm we ought to unconditionally copy all the registers and mark all
registers as available, since it's so cheap. This will avoid some callbacks.
We can even to it on the vmexit path, so there will be no
svm_cache_regs() at all.
> @@ -707,9 +708,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
> unsigned long rip;
> u32 interruptibility;
>
> - rip = vmcs_readl(GUEST_RIP);
> + rip = guest_register_read(vcpu, VCPU_REGS_RIP);
>
Perhaps we ought to have a guest_rip_read() since rip is not truly a GPR.
>
> static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
> @@ -2370,22 +2379,18 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
> (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
> switch (cr) {
> case 0:
> - vcpu_load_rsp_rip(vcpu);
> kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
>
What if reg points at rsp? You need to replace arch.regs[*] with the
accessor.
> @@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
> struct kvm_cpuid_entry2 __user *entries);
>
> struct kvm_x86_ops *kvm_x86_ops;
> +EXPORT_SYMBOL(kvm_x86_ops);
>
_GPL
> +static void flush_regs(struct kvm_vcpu *vcpu)
> +{
> + if (__test_and_clear_bit(VCPU_REGS_RSP, &vcpu->arch.regs_dirty))
> + kvm_x86_ops->decache_regs(vcpu, VCPU_REGS_RSP);
> + if (__test_and_clear_bit(VCPU_REGS_RIP, &vcpu->arch.regs_dirty))
> + kvm_x86_ops->decache_regs(vcpu, VCPU_REGS_RIP);
> + if (__test_and_clear_bit(VCPU_REGS_RAX, &vcpu->arch.regs_dirty))
> + kvm_x86_ops->decache_regs(vcpu, VCPU_REGS_RAX);
> +}
>
This is better done in $subarch_vcpu_run, as it avoids callbacks and
knows exactly which regs to look at. We can do it unconditionally for
svm, too.
>
> +void cache_all_regs(struct kvm_vcpu *vcpu)
> +{
> + kvm_x86_ops->cache_regs(vcpu, VCPU_REGS_RAX);
> + kvm_x86_ops->cache_regs(vcpu, VCPU_REGS_RSP);
> + kvm_x86_ops->cache_regs(vcpu, VCPU_REGS_RIP);
> +}
> +
> +void decache_all_regs(struct kvm_vcpu *vcpu)
> +{
> + guest_register_write(vcpu, VCPU_REGS_RAX,
> + vcpu->arch.regs[VCPU_REGS_RAX]);
> + guest_register_write(vcpu, VCPU_REGS_RSP,
> + vcpu->arch.regs[VCPU_REGS_RSP]);
> + guest_register_write(vcpu, VCPU_REGS_RIP,
> + vcpu->arch.regs[VCPU_REGS_RIP]);
> +}
> +
>
static
> @@ -2865,6 +2904,8 @@ again:
> local_irq_enable();
>
> ++vcpu->stat.exits;
> + vcpu->arch.regs_available = KVM_CACHED_REGS;
> + vcpu->arch.regs_dirty = 0;
>
How can you have a constant for this? Each subarch has different cached
regs. This ought to be set in $subarch_vcpu_run.
> diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
> index 851184d..cc5c94b 100644
> --- a/include/asm-x86/kvm_host.h
> +++ b/include/asm-x86/kvm_host.h
> @@ -87,7 +87,7 @@ extern struct list_head vm_list;
> struct kvm_vcpu;
> struct kvm;
>
> -enum {
> +enum kvm_reg {
> VCPU_REGS_RAX = 0,
> VCPU_REGS_RCX = 1,
> VCPU_REGS_RDX = 2,
> @@ -106,9 +106,21 @@ enum {
> VCPU_REGS_R14 = 14,
> VCPU_REGS_R15 = 15,
> #endif
> + VCPU_REGS_RIP = 16,
> NR_VCPU_REGS
> };
>
No, rip is not a GPR.
>
> +/*
> + * List of registers already read by kvm_x86_ops->run().
> + */
> +#define KVM_CACHED_REGS ((1 << VCPU_REGS_RCX) | (1 << VCPU_REGS_RDX) | \
> + (1 << VCPU_REGS_RBX) | (1 << VCPU_REGS_RBP) | \
> + (1 << VCPU_REGS_RSI) | (1 << VCPU_REGS_RDI) | \
> + (1 << VCPU_REGS_R8) | (1 << VCPU_REGS_R9) | \
> + (1 << VCPU_REGS_R10) | (1 << VCPU_REGS_R11) | \
> + (1 << VCPU_REGS_R12) | (1 << VCPU_REGS_R13) | \
> + (1 << VCPU_REGS_R14) | (1 << VCPU_REGS_R15))
> +
>
As mentioned earlier, this is subarch specific (and better written as
~(1 << VCPU_REGS_R?X)).
> @@ -410,8 +423,8 @@ struct kvm_x86_ops {
> unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
> void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
> int *exception);
> - void (*cache_regs)(struct kvm_vcpu *vcpu);
> - void (*decache_regs)(struct kvm_vcpu *vcpu);
> + void (*cache_regs)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
> + void (*decache_regs)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
> unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
> void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
>
Remove the plural s as the callbacks now affect a single register.
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-26 9:18 ` Avi Kivity
@ 2008-06-26 14:52 ` Marcelo Tosatti
2008-06-26 22:15 ` Marcelo Tosatti
1 sibling, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-26 14:52 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Thu, Jun 26, 2008 at 12:18:07PM +0300, Avi Kivity wrote:
>
>> static int set_guest_debug(struct kvm_vcpu *vcpu, struct
>> kvm_debug_guest *dbg)
>> @@ -2370,22 +2379,18 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>> (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
>> switch (cr) {
>> case 0:
>> - vcpu_load_rsp_rip(vcpu);
>> kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
>>
>
> What if reg points at rsp? You need to replace arch.regs[*] with the
> accessor.
Catch!
>> @@ -2865,6 +2904,8 @@ again:
>> local_irq_enable();
>> ++vcpu->stat.exits;
>> + vcpu->arch.regs_available = KVM_CACHED_REGS;
>> + vcpu->arch.regs_dirty = 0;
>>
>
> How can you have a constant for this? Each subarch has different cached
> regs. This ought to be set in $subarch_vcpu_run.
This is the intersection of registers cached by both architectures. But
I agree that moving it down to subarch code is saner.
>> diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
>> index 851184d..cc5c94b 100644
>> --- a/include/asm-x86/kvm_host.h
>> +++ b/include/asm-x86/kvm_host.h
>> @@ -87,7 +87,7 @@ extern struct list_head vm_list;
>> struct kvm_vcpu;
>> struct kvm;
>> -enum {
>> +enum kvm_reg {
>> VCPU_REGS_RAX = 0,
>> VCPU_REGS_RCX = 1,
>> VCPU_REGS_RDX = 2,
>> @@ -106,9 +106,21 @@ enum {
>> VCPU_REGS_R14 = 14,
>> VCPU_REGS_R15 = 15,
>> #endif
>> + VCPU_REGS_RIP = 16,
>> NR_VCPU_REGS
>> };
>>
>
> No, rip is not a GPR.
We need the RIP index to be part of the kvm_reg space (to index in the
dirty/available bitmaps). Otherwise you have to special case it.
Sure it is not a GPR, but what is the problem storing RIP in regs array
instead of a separate variable ?
> Perhaps we ought to have a guest_rip_read() since rip is not truly a GPR.
#define kvm_guest_read_rip(vcpu) kvm_guest_register_read(vcpu, VCPU_REGS_RIP)
?
Will fix the remaining comments.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-26 9:18 ` Avi Kivity
2008-06-26 14:52 ` Marcelo Tosatti
@ 2008-06-26 22:15 ` Marcelo Tosatti
2008-06-27 2:28 ` Marcelo Tosatti
1 sibling, 1 reply; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-26 22:15 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Thu, Jun 26, 2008 at 12:18:07PM +0300, Avi Kivity wrote:
> A new header file is excessive. Also, these are global names, so please
> prefix with kvm_.
The reason for a separate header is because these accessors need both
kvm_vcpu (linux/kvm_host.h) and kvm_vcpu_arch (asm/kvm_host.h).
I couldnt think of a better location to put them. Ideas?
>> @@ -241,7 +242,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
>> svm->vmcb->save.rip,
>> svm->next_rip);
>> - vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
>> + svm->vmcb->save.rip = svm->next_rip;
>> + guest_register_write(vcpu, VCPU_REGS_RIP, svm->vmcb->save.rip);
>> svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
>>
>
> No need to write into save.rip, is there?
I'm not sure why it was there in the first place. Perhaps there's code
which reads svm->vmcb->save.rip directly as the current RIP after this
point?
Also "kvm_guest_register_read" is too long, so I dropped "guest".
--- /dev/null 2008-06-26 10:56:31.025001212 -0300
+++ b/arch/x86/kvm/kvm_cache_regs.h 2008-06-26 18:38:48.000000000 -0300
@@ -0,0 +1,36 @@
+#ifndef ASM_KVM_CACHE_REGS_H
+#define ASM_KVM_CACHE_REGS_H
+
+static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ if (!__test_and_set_bit(reg, &vcpu->arch.regs_available))
+ kvm_x86_ops->cache_reg(vcpu, reg);
+
+ return vcpu->arch.regs[reg];
+}
+
+static inline void kvm_register_write(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg,
+ unsigned long val)
+{
+ vcpu->arch.regs[reg] = val;
+ __set_bit(reg, &vcpu->arch.regs_dirty);
+}
+
+static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
+{
+ if (!__test_and_set_bit(VCPU_REGS_RIP, &vcpu->arch.regs_available))
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RIP);
+
+ return vcpu->arch.rip;
+}
+
+static inline void kvm_rip_write(struct kvm_vcpu *vcpu,
+ unsigned long val)
+{
+ vcpu->arch.rip = val;
+ __set_bit(VCPU_REGS_RIP, &vcpu->arch.regs_dirty);
+}
+
+#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 73f43de..9fde0ac 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -32,6 +32,7 @@
#include <asm/current.h>
#include <asm/apicdef.h>
#include <asm/atomic.h>
+#include "kvm_cache_regs.h"
#include "irq.h"
#define PRId64 "d"
@@ -558,8 +559,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
struct kvm_run *run = vcpu->run;
set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
- kvm_x86_ops->cache_regs(vcpu);
- run->tpr_access.rip = vcpu->arch.rip;
+ run->tpr_access.rip = kvm_rip_read(vcpu);
run->tpr_access.is_write = write;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 238e8f3..532a393 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -18,6 +18,7 @@
#include "kvm_svm.h"
#include "irq.h"
#include "mmu.h"
+#include "kvm_cache_regs.h"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -241,7 +242,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm->vmcb->save.rip,
svm->next_rip);
- vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
+ svm->vmcb->save.rip = svm->next_rip;
+ kvm_rip_write(vcpu, svm->vmcb->save.rip);
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
vcpu->arch.interrupt_window_open = 1;
@@ -709,21 +711,42 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc);
}
-static void svm_cache_regs(struct kvm_vcpu *vcpu)
+static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
struct vcpu_svm *svm = to_svm(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
- vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
- vcpu->arch.rip = svm->vmcb->save.rip;
+ switch (reg) {
+ case VCPU_REGS_RAX:
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+ break;
+ case VCPU_REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+ break;
+ case VCPU_REGS_RIP:
+ vcpu->arch.rip = svm->vmcb->save.rip;
+ break;
+ default:
+ break;
+ }
}
-static void svm_decache_regs(struct kvm_vcpu *vcpu)
+static void svm_decache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.rip;
+
+ switch (reg) {
+ case VCPU_REGS_RAX:
+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+ break;
+ case VCPU_REGS_RSP:
+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+ break;
+ case VCPU_REGS_RIP:
+ svm->vmcb->save.rip = vcpu->arch.rip;
+ break;
+ default:
+ break;
+ }
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -1688,6 +1711,21 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}
+static void svm_flush_regs(struct kvm_vcpu *vcpu)
+{
+ svm_decache_reg(vcpu, VCPU_REGS_RSP);
+ svm_decache_reg(vcpu, VCPU_REGS_RIP);
+ svm_decache_reg(vcpu, VCPU_REGS_RAX);
+}
+
+static void svm_cache_regs(struct kvm_vcpu *vcpu)
+{
+ svm_cache_reg(vcpu, VCPU_REGS_RSP);
+ svm_cache_reg(vcpu, VCPU_REGS_RIP);
+ svm_cache_reg(vcpu, VCPU_REGS_RAX);
+ vcpu->arch.regs_available = ~0U;
+}
+
static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1695,6 +1733,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u16 gs_selector;
u16 ldt_selector;
+ svm_flush_regs(vcpu);
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
@@ -1849,6 +1888,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
+ svm_cache_regs(vcpu);
+ vcpu->arch.regs_dirty = 0;
}
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -1949,8 +1990,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_gdt = svm_set_gdt,
.get_dr = svm_get_dr,
.set_dr = svm_set_dr,
- .cache_regs = svm_cache_regs,
- .decache_regs = svm_decache_regs,
+ .cache_reg = svm_cache_reg,
+ .decache_reg = svm_decache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e4278d..533d530 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
+#include "kvm_cache_regs.h"
#include <asm/io.h>
#include <asm/desc.h>
@@ -707,9 +708,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
unsigned long rip;
u32 interruptibility;
- rip = vmcs_readl(GUEST_RIP);
+ rip = kvm_rip_read(vcpu);
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- vmcs_writel(GUEST_RIP, rip);
+ kvm_rip_write(vcpu, rip);
/*
* We emulated an instruction, so temporary interrupt blocking
@@ -931,24 +932,32 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
return ret;
}
-/*
- * Sync the rsp and rip registers into the vcpu structure. This allows
- * registers to be accessed by indexing vcpu->arch.regs.
- */
-static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
- vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
- vcpu->arch.rip = vmcs_readl(GUEST_RIP);
+ switch (reg) {
+ case VCPU_REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+ break;
+ case VCPU_REGS_RIP:
+ vcpu->arch.rip = vmcs_readl(GUEST_RIP);
+ break;
+ default:
+ break;
+ }
}
-/*
- * Syncs rsp and rip back into the vmcs. Should be called after possible
- * modification.
- */
-static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_decache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
- vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+ switch (reg) {
+ case VCPU_REGS_RSP:
+ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+ break;
+ case VCPU_REGS_RIP:
+ vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+ break;
+ default:
+ break;
+ }
}
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
@@ -2121,11 +2130,11 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = irq;
- vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
+ vmx->rmode.irq.rip = kvm_rip_read(vcpu);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
- vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
+ kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
return;
}
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -2270,7 +2279,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
error_code = 0;
- rip = vmcs_readl(GUEST_RIP);
+ rip = kvm_rip_read(vcpu);
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
if (is_page_fault(intr_info)) {
@@ -2366,27 +2375,25 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
- KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg],
- (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
+ KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
+ (u32)kvm_register_read(vcpu, reg),
+ (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
+ handler);
switch (cr) {
case 0:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
return 1;
case 3:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
return 1;
case 4:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
return 1;
case 8:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
return 1;
@@ -2395,7 +2402,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
};
break;
case 2: /* clts */
- vcpu_load_rsp_rip(vcpu);
vmx_fpu_deactivate(vcpu);
vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
@@ -2406,21 +2412,17 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
case 1: /*mov from cr*/
switch (cr) {
case 3:
- vcpu_load_rsp_rip(vcpu);
- vcpu->arch.regs[reg] = vcpu->arch.cr3;
- vcpu_put_rsp_rip(vcpu);
+ kvm_register_write(vcpu, reg, vcpu->arch.cr3);
KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
- (u32)vcpu->arch.regs[reg],
- (u32)((u64)vcpu->arch.regs[reg] >> 32),
+ (u32)kvm_register_read(vcpu, reg),
+ (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
handler);
skip_emulated_instruction(vcpu);
return 1;
case 8:
- vcpu_load_rsp_rip(vcpu);
- vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
- vcpu_put_rsp_rip(vcpu);
+ kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
- (u32)vcpu->arch.regs[reg], handler);
+ (u32)kvm_register_read(vcpu, reg), handler);
skip_emulated_instruction(vcpu);
return 1;
}
@@ -2452,7 +2454,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & 7;
reg = (exit_qualification >> 8) & 15;
- vcpu_load_rsp_rip(vcpu);
if (exit_qualification & 16) {
/* mov from dr */
switch (dr) {
@@ -2465,12 +2466,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
default:
val = 0;
}
- vcpu->arch.regs[reg] = val;
+ kvm_register_write(vcpu, reg, val);
KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
} else {
/* mov to dr */
}
- vcpu_put_rsp_rip(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
@@ -2715,8 +2715,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vectoring_info = vmx->idt_vectoring_info;
- KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
- (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
+ KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
+ (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
/* Access CR3 don't cause VMExit in paging mode, so we need
* to sync with guest real CR3. */
@@ -2916,11 +2916,21 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx)
| vmx->rmode.irq.vector;
}
+static void vmx_flush_regs(struct kvm_vcpu *vcpu)
+{
+ if (__test_and_clear_bit(VCPU_REGS_RSP, &vcpu->arch.regs_dirty))
+ vmx_decache_reg(vcpu, VCPU_REGS_RSP);
+ if (__test_and_clear_bit(VCPU_REGS_RIP, &vcpu->arch.regs_dirty))
+ vmx_decache_reg(vcpu, VCPU_REGS_RIP);
+}
+
static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info;
+ vmx_flush_regs(vcpu);
+
/*
* Loading guest fpu may have cleared host cr0.ts
*/
@@ -3060,6 +3070,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
KVMTRACE_0D(NMI, vcpu, handler);
asm("int $2");
}
+ vcpu->arch.regs_available = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
+ vcpu->arch.regs_dirty = 0;
}
static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
@@ -3213,8 +3225,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
- .cache_regs = vcpu_load_rsp_rip,
- .decache_regs = vcpu_put_rsp_rip,
+ .cache_reg = vmx_cache_reg,
+ .decache_reg = vmx_decache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 26b051b..970e272 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -19,6 +19,7 @@
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
+#include "kvm_cache_regs.h"
#include <linux/clocksource.h>
#include <linux/kvm.h>
@@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
struct kvm_x86_ops *kvm_x86_ops;
+EXPORT_SYMBOL_GPL(kvm_x86_ops);
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -2028,7 +2030,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
{
u8 opcodes[4];
- unsigned long rip = vcpu->arch.rip;
+ unsigned long rip = kvm_rip_read(vcpu);
unsigned long rip_linear;
if (!printk_ratelimit())
@@ -2050,6 +2052,22 @@ static struct x86_emulate_ops emulate_ops = {
.cmpxchg_emulated = emulator_cmpxchg_emulated,
};
+static void cache_all_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RAX);
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RSP);
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RIP);
+}
+
+static void decache_all_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_register_write(vcpu, VCPU_REGS_RAX,
+ vcpu->arch.regs[VCPU_REGS_RAX]);
+ kvm_register_write(vcpu, VCPU_REGS_RSP,
+ vcpu->arch.regs[VCPU_REGS_RSP]);
+ kvm_rip_write(vcpu, vcpu->arch.rip);
+}
+
int emulate_instruction(struct kvm_vcpu *vcpu,
struct kvm_run *run,
unsigned long cr2,
@@ -2060,7 +2078,13 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
struct decode_cache *c;
vcpu->arch.mmio_fault_cr2 = cr2;
- kvm_x86_ops->cache_regs(vcpu);
+ /*
+ * TODO: fix x86_emulate.c to use guest_read/write_register
+ * instead of direct ->regs accesses, can save hundred cycles
+ * on Intel for instructions that don't read/change RSP, for
+ * for example.
+ */
+ cache_all_regs(vcpu);
vcpu->mmio_is_write = 0;
vcpu->arch.pio.string = 0;
@@ -2141,7 +2165,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
- kvm_x86_ops->decache_regs(vcpu);
+ decache_all_regs(vcpu);
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
if (vcpu->mmio_is_write) {
@@ -2194,18 +2218,19 @@ int complete_pio(struct kvm_vcpu *vcpu)
struct kvm_pio_request *io = &vcpu->arch.pio;
long delta;
int r;
-
- kvm_x86_ops->cache_regs(vcpu);
+ unsigned long val;
if (!io->string) {
- if (io->in)
- memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
- io->size);
+ if (io->in) {
+ val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ memcpy(&val, vcpu->arch.pio_data, io->size);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ }
} else {
if (io->in) {
r = pio_copy_data(vcpu);
if (r) {
- kvm_x86_ops->cache_regs(vcpu);
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RAX);
return r;
}
}
@@ -2217,19 +2242,24 @@ int complete_pio(struct kvm_vcpu *vcpu)
* The size of the register should really depend on
* current address size.
*/
- vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
+ val = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ val -= delta;
+ kvm_register_write(vcpu, VCPU_REGS_RCX, val);
}
if (io->down)
delta = -delta;
delta *= io->size;
- if (io->in)
- vcpu->arch.regs[VCPU_REGS_RDI] += delta;
- else
- vcpu->arch.regs[VCPU_REGS_RSI] += delta;
+ if (io->in) {
+ val = kvm_register_read(vcpu, VCPU_REGS_RDI);
+ val += delta;
+ kvm_register_write(vcpu, VCPU_REGS_RDI, val);
+ } else {
+ val = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ val += delta;
+ kvm_register_write(vcpu, VCPU_REGS_RSI, val);
+ }
}
- kvm_x86_ops->decache_regs(vcpu);
-
io->count -= io->cur_count;
io->cur_count = 0;
@@ -2282,6 +2312,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned port)
{
struct kvm_io_device *pio_dev;
+ unsigned long val;
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2302,8 +2333,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
handler);
- kvm_x86_ops->cache_regs(vcpu);
- memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
+ val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ memcpy(vcpu->arch.pio_data, &val, 4);
kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2488,13 +2519,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int r = 1;
- kvm_x86_ops->cache_regs(vcpu);
-
- nr = vcpu->arch.regs[VCPU_REGS_RAX];
- a0 = vcpu->arch.regs[VCPU_REGS_RBX];
- a1 = vcpu->arch.regs[VCPU_REGS_RCX];
- a2 = vcpu->arch.regs[VCPU_REGS_RDX];
- a3 = vcpu->arch.regs[VCPU_REGS_RSI];
+ nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
@@ -2517,8 +2546,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
ret = -KVM_ENOSYS;
break;
}
- vcpu->arch.regs[VCPU_REGS_RAX] = ret;
- kvm_x86_ops->decache_regs(vcpu);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
++vcpu->stat.hypercalls;
return r;
}
@@ -2528,6 +2556,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
{
char instruction[3];
int ret = 0;
+ unsigned long rip = kvm_rip_read(vcpu);
/*
@@ -2537,9 +2566,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
*/
kvm_mmu_zap_all(vcpu->kvm);
- kvm_x86_ops->cache_regs(vcpu);
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
+ if (emulator_write_emulated(rip, instruction, 3, vcpu)
!= X86EMUL_CONTINUE)
ret = -EFAULT;
@@ -2669,13 +2697,12 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
u32 function, index;
struct kvm_cpuid_entry2 *e, *best;
- kvm_x86_ops->cache_regs(vcpu);
- function = vcpu->arch.regs[VCPU_REGS_RAX];
- index = vcpu->arch.regs[VCPU_REGS_RCX];
- vcpu->arch.regs[VCPU_REGS_RAX] = 0;
- vcpu->arch.regs[VCPU_REGS_RBX] = 0;
- vcpu->arch.regs[VCPU_REGS_RCX] = 0;
- vcpu->arch.regs[VCPU_REGS_RDX] = 0;
+ function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
best = NULL;
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
e = &vcpu->arch.cpuid_entries[i];
@@ -2693,12 +2720,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
best = e;
}
if (best) {
- vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
- vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
- vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
- vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
}
- kvm_x86_ops->decache_regs(vcpu);
kvm_x86_ops->skip_emulated_instruction(vcpu);
KVMTRACE_5D(CPUID, vcpu, function,
(u32)vcpu->arch.regs[VCPU_REGS_RAX],
@@ -2884,8 +2910,8 @@ again:
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
- kvm_x86_ops->cache_regs(vcpu);
- profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
+ unsigned long rip = kvm_rip_read(vcpu);
+ profile_hit(KVM_PROFILING, (void *)rip);
}
if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
@@ -2968,11 +2994,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
}
#endif
- if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
- kvm_x86_ops->cache_regs(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
- kvm_x86_ops->decache_regs(vcpu);
- }
+ if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
+ kvm_register_write(vcpu, VCPU_REGS_RAX,
+ kvm_run->hypercall.ret);
r = __vcpu_run(vcpu, kvm_run);
@@ -2988,28 +3012,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
-
- regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
- regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
- regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
- regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
- regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
- regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
- regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
+ regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
+ regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
#ifdef CONFIG_X86_64
- regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
- regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
- regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
- regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
- regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
- regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
- regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
- regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
+ regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
+ regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
+ regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
+ regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
+ regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
+ regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
+ regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
+ regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
#endif
- regs->rip = vcpu->arch.rip;
+ regs->rip = kvm_rip_read(vcpu);
regs->rflags = kvm_x86_ops->get_rflags(vcpu);
/*
@@ -3027,29 +3049,29 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
- vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
- vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
- vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
- vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
- vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
- vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
- vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
+ kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
+ kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
#ifdef CONFIG_X86_64
- vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
- vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
- vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
- vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
- vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
- vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
- vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
- vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
+ kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
+ kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
+ kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
+ kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
+ kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
+ kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
+ kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
+ kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+
#endif
- vcpu->arch.rip = regs->rip;
+ kvm_rip_write(vcpu, regs->rip);
kvm_x86_ops->set_rflags(vcpu, regs->rflags);
- kvm_x86_ops->decache_regs(vcpu);
vcpu->arch.exception.pending = false;
@@ -3323,17 +3345,16 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
struct tss_segment_32 *tss)
{
tss->cr3 = vcpu->arch.cr3;
- tss->eip = vcpu->arch.rip;
+ tss->eip = kvm_rip_read(vcpu);
tss->eflags = kvm_x86_ops->get_rflags(vcpu);
- tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
- tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
- tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
- tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
- tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
- tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
- tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
-
+ tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+ tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
@@ -3349,17 +3370,17 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
{
kvm_set_cr3(vcpu, tss->cr3);
- vcpu->arch.rip = tss->eip;
+ kvm_rip_write(vcpu, tss->eip);
kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
- vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
- vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
- vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
- vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
- vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
- vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
- vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
- vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
+ kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
+ kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
+ kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
return 1;
@@ -3387,16 +3408,16 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
static void save_state_to_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
- tss->ip = vcpu->arch.rip;
+ tss->ip = kvm_rip_read(vcpu);
tss->flag = kvm_x86_ops->get_rflags(vcpu);
- tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
- tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
- tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
- tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
- tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
- tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
- tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
- tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+ tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+ tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
@@ -3409,16 +3430,16 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu,
static int load_state_from_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
- vcpu->arch.rip = tss->ip;
+ kvm_rip_write(vcpu, tss->ip);
kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
- vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
- vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
- vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
- vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
- vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
- vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
- vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
- vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
+ kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
+ kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
+ kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
return 1;
@@ -3526,7 +3547,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
}
kvm_x86_ops->skip_emulated_instruction(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
if (nseg_desc.type & 8)
ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
@@ -3551,7 +3571,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
tr_seg.type = 11;
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
out:
- kvm_x86_ops->decache_regs(vcpu);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 38926b7..64fe207 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -26,6 +26,7 @@
#define DPRINTF(_f, _a ...) printf(_f , ## _a)
#else
#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
#define DPRINTF(x...) do {} while (0)
#endif
#include <linux/module.h>
@@ -806,7 +807,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
switch (mode) {
@@ -1245,7 +1246,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (c->rep_prefix && (c->d & String)) {
/* All REP prefixes have the same first termination condition */
if (c->regs[VCPU_REGS_RCX] == 0) {
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
/* The second termination condition only applies for REPE
@@ -1259,17 +1260,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
(c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0)) {
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
if ((c->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
}
c->regs[VCPU_REGS_RCX]--;
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
}
if (c->src.type == OP_MEM) {
@@ -1750,7 +1751,7 @@ writeback:
/* Commit shadow register state. */
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
done:
if (rc == X86EMUL_UNHANDLEABLE) {
@@ -1775,7 +1776,7 @@ twobyte_insn:
goto done;
/* Let the processor re-execute the fixed hypercall */
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
/* Disable writeback. */
c->dst.type = OP_NONE;
break;
@@ -1871,7 +1872,7 @@ twobyte_insn:
rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
}
rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE;
@@ -1881,7 +1882,7 @@ twobyte_insn:
rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
} else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32;
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 851184d..515add9 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -87,7 +87,7 @@ extern struct list_head vm_list;
struct kvm_vcpu;
struct kvm;
-enum {
+enum kvm_reg {
VCPU_REGS_RAX = 0,
VCPU_REGS_RCX = 1,
VCPU_REGS_RDX = 2,
@@ -109,6 +109,8 @@ enum {
NR_VCPU_REGS
};
+#define VCPU_REGS_RIP NR_VCPU_REGS
+
enum {
VCPU_SREG_ES,
VCPU_SREG_CS,
@@ -217,8 +219,14 @@ struct kvm_vcpu_arch {
int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
- unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
- unsigned long rip; /* needs vcpu_load_rsp_rip() */
+ /*
+ * rip and regs accesses must go through
+ * kvm_{register,rip}_{read,write} functions.
+ */
+ unsigned long regs[NR_VCPU_REGS];
+ unsigned long rip;
+ u32 regs_available;
+ u32 regs_dirty;
unsigned long cr0;
unsigned long cr2;
@@ -410,8 +418,8 @@ struct kvm_x86_ops {
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
int *exception);
- void (*cache_regs)(struct kvm_vcpu *vcpu);
- void (*decache_regs)(struct kvm_vcpu *vcpu);
+ void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ void (*decache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: RFC: cache_regs in kvm_emulate_pio
2008-06-26 22:15 ` Marcelo Tosatti
@ 2008-06-27 2:28 ` Marcelo Tosatti
0 siblings, 0 replies; 12+ messages in thread
From: Marcelo Tosatti @ 2008-06-27 2:28 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
On Thu, Jun 26, 2008 at 07:15:45PM -0300, Marcelo Tosatti wrote:
> On Thu, Jun 26, 2008 at 12:18:07PM +0300, Avi Kivity wrote:
> > A new header file is excessive. Also, these are global names, so please
> > prefix with kvm_.
>
> The reason for a separate header is because these accessors need both
> kvm_vcpu (linux/kvm_host.h) and kvm_vcpu_arch (asm/kvm_host.h).
>
> I couldnt think of a better location to put them. Ideas?
>
> >> @@ -241,7 +242,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
> >> svm->vmcb->save.rip,
> >> svm->next_rip);
> >> - vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
> >> + svm->vmcb->save.rip = svm->next_rip;
> >> + guest_register_write(vcpu, VCPU_REGS_RIP, svm->vmcb->save.rip);
> >> svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
> >>
> >
> > No need to write into save.rip, is there?
>
> I'm not sure why it was there in the first place. Perhaps there's code
> which reads svm->vmcb->save.rip directly as the current RIP after this
> point?
>
> Also "kvm_guest_register_read" is too long, so I dropped "guest".
There was a missing convertion in vmx_vcpu_reset(). And haven't tested
AMD.
--- /dev/null 2008-06-26 10:56:31.025001212 -0300
+++ b/arch/x86/kvm/kvm_cache_regs.h 2008-06-26 23:21:28.000000000 -0300
@@ -0,0 +1,36 @@
+#ifndef ASM_KVM_CACHE_REGS_H
+#define ASM_KVM_CACHE_REGS_H
+
+static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ if (!__test_and_set_bit(reg, &vcpu->arch.regs_available))
+ kvm_x86_ops->cache_reg(vcpu, reg);
+
+ return vcpu->arch.regs[reg];
+}
+
+static inline void kvm_register_write(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg,
+ unsigned long val)
+{
+ vcpu->arch.regs[reg] = val;
+ __set_bit(reg, &vcpu->arch.regs_dirty);
+}
+
+static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
+{
+ if (!__test_and_set_bit(VCPU_REGS_RIP, &vcpu->arch.regs_available))
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RIP);
+
+ return vcpu->arch.rip;
+}
+
+static inline void kvm_rip_write(struct kvm_vcpu *vcpu,
+ unsigned long val)
+{
+ vcpu->arch.rip = val;
+ __set_bit(VCPU_REGS_RIP, &vcpu->arch.regs_dirty);
+}
+
+#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 73f43de..9fde0ac 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -32,6 +32,7 @@
#include <asm/current.h>
#include <asm/apicdef.h>
#include <asm/atomic.h>
+#include "kvm_cache_regs.h"
#include "irq.h"
#define PRId64 "d"
@@ -558,8 +559,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
struct kvm_run *run = vcpu->run;
set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
- kvm_x86_ops->cache_regs(vcpu);
- run->tpr_access.rip = vcpu->arch.rip;
+ run->tpr_access.rip = kvm_rip_read(vcpu);
run->tpr_access.is_write = write;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 238e8f3..532a393 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -18,6 +18,7 @@
#include "kvm_svm.h"
#include "irq.h"
#include "mmu.h"
+#include "kvm_cache_regs.h"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -241,7 +242,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm->vmcb->save.rip,
svm->next_rip);
- vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
+ svm->vmcb->save.rip = svm->next_rip;
+ kvm_rip_write(vcpu, svm->vmcb->save.rip);
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
vcpu->arch.interrupt_window_open = 1;
@@ -709,21 +711,42 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc);
}
-static void svm_cache_regs(struct kvm_vcpu *vcpu)
+static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
struct vcpu_svm *svm = to_svm(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
- vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
- vcpu->arch.rip = svm->vmcb->save.rip;
+ switch (reg) {
+ case VCPU_REGS_RAX:
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+ break;
+ case VCPU_REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+ break;
+ case VCPU_REGS_RIP:
+ vcpu->arch.rip = svm->vmcb->save.rip;
+ break;
+ default:
+ break;
+ }
}
-static void svm_decache_regs(struct kvm_vcpu *vcpu)
+static void svm_decache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.rip;
+
+ switch (reg) {
+ case VCPU_REGS_RAX:
+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+ break;
+ case VCPU_REGS_RSP:
+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+ break;
+ case VCPU_REGS_RIP:
+ svm->vmcb->save.rip = vcpu->arch.rip;
+ break;
+ default:
+ break;
+ }
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -1688,6 +1711,21 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}
+static void svm_flush_regs(struct kvm_vcpu *vcpu)
+{
+ svm_decache_reg(vcpu, VCPU_REGS_RSP);
+ svm_decache_reg(vcpu, VCPU_REGS_RIP);
+ svm_decache_reg(vcpu, VCPU_REGS_RAX);
+}
+
+static void svm_cache_regs(struct kvm_vcpu *vcpu)
+{
+ svm_cache_reg(vcpu, VCPU_REGS_RSP);
+ svm_cache_reg(vcpu, VCPU_REGS_RIP);
+ svm_cache_reg(vcpu, VCPU_REGS_RAX);
+ vcpu->arch.regs_available = ~0U;
+}
+
static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1695,6 +1733,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u16 gs_selector;
u16 ldt_selector;
+ svm_flush_regs(vcpu);
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
@@ -1849,6 +1888,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
+ svm_cache_regs(vcpu);
+ vcpu->arch.regs_dirty = 0;
}
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -1949,8 +1990,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_gdt = svm_set_gdt,
.get_dr = svm_get_dr,
.set_dr = svm_set_dr,
- .cache_regs = svm_cache_regs,
- .decache_regs = svm_decache_regs,
+ .cache_reg = svm_cache_reg,
+ .decache_reg = svm_decache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e4278d..7eebe02 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
+#include "kvm_cache_regs.h"
#include <asm/io.h>
#include <asm/desc.h>
@@ -707,9 +708,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
unsigned long rip;
u32 interruptibility;
- rip = vmcs_readl(GUEST_RIP);
+ rip = kvm_rip_read(vcpu);
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- vmcs_writel(GUEST_RIP, rip);
+ kvm_rip_write(vcpu, rip);
/*
* We emulated an instruction, so temporary interrupt blocking
@@ -931,24 +932,32 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
return ret;
}
-/*
- * Sync the rsp and rip registers into the vcpu structure. This allows
- * registers to be accessed by indexing vcpu->arch.regs.
- */
-static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
- vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
- vcpu->arch.rip = vmcs_readl(GUEST_RIP);
+ switch (reg) {
+ case VCPU_REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+ break;
+ case VCPU_REGS_RIP:
+ vcpu->arch.rip = vmcs_readl(GUEST_RIP);
+ break;
+ default:
+ break;
+ }
}
-/*
- * Syncs rsp and rip back into the vmcs. Should be called after possible
- * modification.
- */
-static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_decache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
- vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+ switch (reg) {
+ case VCPU_REGS_RSP:
+ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+ break;
+ case VCPU_REGS_RIP:
+ vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+ break;
+ default:
+ break;
+ }
}
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
@@ -2054,10 +2063,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RFLAGS, 0x02);
if (vmx->vcpu.vcpu_id == 0)
- vmcs_writel(GUEST_RIP, 0xfff0);
+ kvm_rip_write(vcpu, 0xfff0);
else
- vmcs_writel(GUEST_RIP, 0);
- vmcs_writel(GUEST_RSP, 0);
+ kvm_rip_write(vcpu, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
/* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
vmcs_writel(GUEST_DR7, 0x400);
@@ -2121,11 +2130,11 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = irq;
- vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
+ vmx->rmode.irq.rip = kvm_rip_read(vcpu);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
- vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
+ kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
return;
}
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -2270,7 +2279,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
error_code = 0;
- rip = vmcs_readl(GUEST_RIP);
+ rip = kvm_rip_read(vcpu);
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
if (is_page_fault(intr_info)) {
@@ -2366,27 +2375,25 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
- KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg],
- (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
+ KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
+ (u32)kvm_register_read(vcpu, reg),
+ (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
+ handler);
switch (cr) {
case 0:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
return 1;
case 3:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
return 1;
case 4:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
return 1;
case 8:
- vcpu_load_rsp_rip(vcpu);
- kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
skip_emulated_instruction(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
return 1;
@@ -2395,7 +2402,6 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
};
break;
case 2: /* clts */
- vcpu_load_rsp_rip(vcpu);
vmx_fpu_deactivate(vcpu);
vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
@@ -2406,21 +2412,17 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
case 1: /*mov from cr*/
switch (cr) {
case 3:
- vcpu_load_rsp_rip(vcpu);
- vcpu->arch.regs[reg] = vcpu->arch.cr3;
- vcpu_put_rsp_rip(vcpu);
+ kvm_register_write(vcpu, reg, vcpu->arch.cr3);
KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
- (u32)vcpu->arch.regs[reg],
- (u32)((u64)vcpu->arch.regs[reg] >> 32),
+ (u32)kvm_register_read(vcpu, reg),
+ (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
handler);
skip_emulated_instruction(vcpu);
return 1;
case 8:
- vcpu_load_rsp_rip(vcpu);
- vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
- vcpu_put_rsp_rip(vcpu);
+ kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
- (u32)vcpu->arch.regs[reg], handler);
+ (u32)kvm_register_read(vcpu, reg), handler);
skip_emulated_instruction(vcpu);
return 1;
}
@@ -2452,7 +2454,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & 7;
reg = (exit_qualification >> 8) & 15;
- vcpu_load_rsp_rip(vcpu);
if (exit_qualification & 16) {
/* mov from dr */
switch (dr) {
@@ -2465,12 +2466,11 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
default:
val = 0;
}
- vcpu->arch.regs[reg] = val;
+ kvm_register_write(vcpu, reg, val);
KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
} else {
/* mov to dr */
}
- vcpu_put_rsp_rip(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
@@ -2715,8 +2715,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vectoring_info = vmx->idt_vectoring_info;
- KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
- (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
+ KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
+ (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
/* Access CR3 don't cause VMExit in paging mode, so we need
* to sync with guest real CR3. */
@@ -2916,11 +2916,21 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx)
| vmx->rmode.irq.vector;
}
+static void vmx_flush_regs(struct kvm_vcpu *vcpu)
+{
+ if (__test_and_clear_bit(VCPU_REGS_RSP, &vcpu->arch.regs_dirty))
+ vmx_decache_reg(vcpu, VCPU_REGS_RSP);
+ if (__test_and_clear_bit(VCPU_REGS_RIP, &vcpu->arch.regs_dirty))
+ vmx_decache_reg(vcpu, VCPU_REGS_RIP);
+}
+
static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info;
+ vmx_flush_regs(vcpu);
+
/*
* Loading guest fpu may have cleared host cr0.ts
*/
@@ -3060,6 +3070,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
KVMTRACE_0D(NMI, vcpu, handler);
asm("int $2");
}
+ vcpu->arch.regs_available = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
+ vcpu->arch.regs_dirty = 0;
}
static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
@@ -3213,8 +3225,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
- .cache_regs = vcpu_load_rsp_rip,
- .decache_regs = vcpu_put_rsp_rip,
+ .cache_reg = vmx_cache_reg,
+ .decache_reg = vmx_decache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 26b051b..970e272 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -19,6 +19,7 @@
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
+#include "kvm_cache_regs.h"
#include <linux/clocksource.h>
#include <linux/kvm.h>
@@ -61,6 +62,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
struct kvm_x86_ops *kvm_x86_ops;
+EXPORT_SYMBOL_GPL(kvm_x86_ops);
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -2028,7 +2030,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
{
u8 opcodes[4];
- unsigned long rip = vcpu->arch.rip;
+ unsigned long rip = kvm_rip_read(vcpu);
unsigned long rip_linear;
if (!printk_ratelimit())
@@ -2050,6 +2052,22 @@ static struct x86_emulate_ops emulate_ops = {
.cmpxchg_emulated = emulator_cmpxchg_emulated,
};
+static void cache_all_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RAX);
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RSP);
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RIP);
+}
+
+static void decache_all_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_register_write(vcpu, VCPU_REGS_RAX,
+ vcpu->arch.regs[VCPU_REGS_RAX]);
+ kvm_register_write(vcpu, VCPU_REGS_RSP,
+ vcpu->arch.regs[VCPU_REGS_RSP]);
+ kvm_rip_write(vcpu, vcpu->arch.rip);
+}
+
int emulate_instruction(struct kvm_vcpu *vcpu,
struct kvm_run *run,
unsigned long cr2,
@@ -2060,7 +2078,13 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
struct decode_cache *c;
vcpu->arch.mmio_fault_cr2 = cr2;
- kvm_x86_ops->cache_regs(vcpu);
+ /*
+ * TODO: fix x86_emulate.c to use guest_read/write_register
+ * instead of direct ->regs accesses, can save hundred cycles
+ * on Intel for instructions that don't read/change RSP, for
+ * for example.
+ */
+ cache_all_regs(vcpu);
vcpu->mmio_is_write = 0;
vcpu->arch.pio.string = 0;
@@ -2141,7 +2165,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
- kvm_x86_ops->decache_regs(vcpu);
+ decache_all_regs(vcpu);
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
if (vcpu->mmio_is_write) {
@@ -2194,18 +2218,19 @@ int complete_pio(struct kvm_vcpu *vcpu)
struct kvm_pio_request *io = &vcpu->arch.pio;
long delta;
int r;
-
- kvm_x86_ops->cache_regs(vcpu);
+ unsigned long val;
if (!io->string) {
- if (io->in)
- memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
- io->size);
+ if (io->in) {
+ val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ memcpy(&val, vcpu->arch.pio_data, io->size);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ }
} else {
if (io->in) {
r = pio_copy_data(vcpu);
if (r) {
- kvm_x86_ops->cache_regs(vcpu);
+ kvm_x86_ops->cache_reg(vcpu, VCPU_REGS_RAX);
return r;
}
}
@@ -2217,19 +2242,24 @@ int complete_pio(struct kvm_vcpu *vcpu)
* The size of the register should really depend on
* current address size.
*/
- vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
+ val = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ val -= delta;
+ kvm_register_write(vcpu, VCPU_REGS_RCX, val);
}
if (io->down)
delta = -delta;
delta *= io->size;
- if (io->in)
- vcpu->arch.regs[VCPU_REGS_RDI] += delta;
- else
- vcpu->arch.regs[VCPU_REGS_RSI] += delta;
+ if (io->in) {
+ val = kvm_register_read(vcpu, VCPU_REGS_RDI);
+ val += delta;
+ kvm_register_write(vcpu, VCPU_REGS_RDI, val);
+ } else {
+ val = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ val += delta;
+ kvm_register_write(vcpu, VCPU_REGS_RSI, val);
+ }
}
- kvm_x86_ops->decache_regs(vcpu);
-
io->count -= io->cur_count;
io->cur_count = 0;
@@ -2282,6 +2312,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned port)
{
struct kvm_io_device *pio_dev;
+ unsigned long val;
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2302,8 +2333,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
handler);
- kvm_x86_ops->cache_regs(vcpu);
- memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
+ val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ memcpy(vcpu->arch.pio_data, &val, 4);
kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2488,13 +2519,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int r = 1;
- kvm_x86_ops->cache_regs(vcpu);
-
- nr = vcpu->arch.regs[VCPU_REGS_RAX];
- a0 = vcpu->arch.regs[VCPU_REGS_RBX];
- a1 = vcpu->arch.regs[VCPU_REGS_RCX];
- a2 = vcpu->arch.regs[VCPU_REGS_RDX];
- a3 = vcpu->arch.regs[VCPU_REGS_RSI];
+ nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
@@ -2517,8 +2546,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
ret = -KVM_ENOSYS;
break;
}
- vcpu->arch.regs[VCPU_REGS_RAX] = ret;
- kvm_x86_ops->decache_regs(vcpu);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
++vcpu->stat.hypercalls;
return r;
}
@@ -2528,6 +2556,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
{
char instruction[3];
int ret = 0;
+ unsigned long rip = kvm_rip_read(vcpu);
/*
@@ -2537,9 +2566,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
*/
kvm_mmu_zap_all(vcpu->kvm);
- kvm_x86_ops->cache_regs(vcpu);
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
+ if (emulator_write_emulated(rip, instruction, 3, vcpu)
!= X86EMUL_CONTINUE)
ret = -EFAULT;
@@ -2669,13 +2697,12 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
u32 function, index;
struct kvm_cpuid_entry2 *e, *best;
- kvm_x86_ops->cache_regs(vcpu);
- function = vcpu->arch.regs[VCPU_REGS_RAX];
- index = vcpu->arch.regs[VCPU_REGS_RCX];
- vcpu->arch.regs[VCPU_REGS_RAX] = 0;
- vcpu->arch.regs[VCPU_REGS_RBX] = 0;
- vcpu->arch.regs[VCPU_REGS_RCX] = 0;
- vcpu->arch.regs[VCPU_REGS_RDX] = 0;
+ function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
best = NULL;
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
e = &vcpu->arch.cpuid_entries[i];
@@ -2693,12 +2720,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
best = e;
}
if (best) {
- vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
- vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
- vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
- vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
}
- kvm_x86_ops->decache_regs(vcpu);
kvm_x86_ops->skip_emulated_instruction(vcpu);
KVMTRACE_5D(CPUID, vcpu, function,
(u32)vcpu->arch.regs[VCPU_REGS_RAX],
@@ -2884,8 +2910,8 @@ again:
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
- kvm_x86_ops->cache_regs(vcpu);
- profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
+ unsigned long rip = kvm_rip_read(vcpu);
+ profile_hit(KVM_PROFILING, (void *)rip);
}
if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
@@ -2968,11 +2994,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
}
#endif
- if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
- kvm_x86_ops->cache_regs(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
- kvm_x86_ops->decache_regs(vcpu);
- }
+ if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
+ kvm_register_write(vcpu, VCPU_REGS_RAX,
+ kvm_run->hypercall.ret);
r = __vcpu_run(vcpu, kvm_run);
@@ -2988,28 +3012,26 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
-
- regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
- regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
- regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
- regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
- regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
- regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
- regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
+ regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
+ regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
#ifdef CONFIG_X86_64
- regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
- regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
- regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
- regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
- regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
- regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
- regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
- regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
+ regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
+ regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
+ regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
+ regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
+ regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
+ regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
+ regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
+ regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
#endif
- regs->rip = vcpu->arch.rip;
+ regs->rip = kvm_rip_read(vcpu);
regs->rflags = kvm_x86_ops->get_rflags(vcpu);
/*
@@ -3027,29 +3049,29 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu_load(vcpu);
- vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
- vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
- vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
- vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
- vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
- vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
- vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
- vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
+ kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
+ kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
#ifdef CONFIG_X86_64
- vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
- vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
- vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
- vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
- vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
- vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
- vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
- vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
+ kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
+ kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
+ kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
+ kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
+ kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
+ kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
+ kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
+ kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+
#endif
- vcpu->arch.rip = regs->rip;
+ kvm_rip_write(vcpu, regs->rip);
kvm_x86_ops->set_rflags(vcpu, regs->rflags);
- kvm_x86_ops->decache_regs(vcpu);
vcpu->arch.exception.pending = false;
@@ -3323,17 +3345,16 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
struct tss_segment_32 *tss)
{
tss->cr3 = vcpu->arch.cr3;
- tss->eip = vcpu->arch.rip;
+ tss->eip = kvm_rip_read(vcpu);
tss->eflags = kvm_x86_ops->get_rflags(vcpu);
- tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
- tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
- tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
- tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
- tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
- tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
- tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
-
+ tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+ tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
@@ -3349,17 +3370,17 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
{
kvm_set_cr3(vcpu, tss->cr3);
- vcpu->arch.rip = tss->eip;
+ kvm_rip_write(vcpu, tss->eip);
kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
- vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
- vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
- vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
- vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
- vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
- vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
- vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
- vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
+ kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
+ kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
+ kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
return 1;
@@ -3387,16 +3408,16 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
static void save_state_to_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
- tss->ip = vcpu->arch.rip;
+ tss->ip = kvm_rip_read(vcpu);
tss->flag = kvm_x86_ops->get_rflags(vcpu);
- tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
- tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
- tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
- tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
- tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
- tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
- tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
- tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+ tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+ tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+ tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
@@ -3409,16 +3430,16 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu,
static int load_state_from_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
- vcpu->arch.rip = tss->ip;
+ kvm_rip_write(vcpu, tss->ip);
kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
- vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
- vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
- vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
- vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
- vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
- vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
- vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
- vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
+ kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
+ kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
+ kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
+ kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
return 1;
@@ -3526,7 +3547,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
}
kvm_x86_ops->skip_emulated_instruction(vcpu);
- kvm_x86_ops->cache_regs(vcpu);
if (nseg_desc.type & 8)
ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
@@ -3551,7 +3571,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
tr_seg.type = 11;
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
out:
- kvm_x86_ops->decache_regs(vcpu);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 38926b7..64fe207 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -26,6 +26,7 @@
#define DPRINTF(_f, _a ...) printf(_f , ## _a)
#else
#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
#define DPRINTF(x...) do {} while (0)
#endif
#include <linux/module.h>
@@ -806,7 +807,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
switch (mode) {
@@ -1245,7 +1246,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (c->rep_prefix && (c->d & String)) {
/* All REP prefixes have the same first termination condition */
if (c->regs[VCPU_REGS_RCX] == 0) {
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
/* The second termination condition only applies for REPE
@@ -1259,17 +1260,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
(c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0)) {
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
if ((c->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
}
c->regs[VCPU_REGS_RCX]--;
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
}
if (c->src.type == OP_MEM) {
@@ -1750,7 +1751,7 @@ writeback:
/* Commit shadow register state. */
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
- ctxt->vcpu->arch.rip = c->eip;
+ kvm_rip_write(ctxt->vcpu, c->eip);
done:
if (rc == X86EMUL_UNHANDLEABLE) {
@@ -1775,7 +1776,7 @@ twobyte_insn:
goto done;
/* Let the processor re-execute the fixed hypercall */
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
/* Disable writeback. */
c->dst.type = OP_NONE;
break;
@@ -1871,7 +1872,7 @@ twobyte_insn:
rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
}
rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE;
@@ -1881,7 +1882,7 @@ twobyte_insn:
rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = ctxt->vcpu->arch.rip;
+ c->eip = kvm_rip_read(ctxt->vcpu);
} else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32;
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 851184d..515add9 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -87,7 +87,7 @@ extern struct list_head vm_list;
struct kvm_vcpu;
struct kvm;
-enum {
+enum kvm_reg {
VCPU_REGS_RAX = 0,
VCPU_REGS_RCX = 1,
VCPU_REGS_RDX = 2,
@@ -109,6 +109,8 @@ enum {
NR_VCPU_REGS
};
+#define VCPU_REGS_RIP NR_VCPU_REGS
+
enum {
VCPU_SREG_ES,
VCPU_SREG_CS,
@@ -217,8 +219,14 @@ struct kvm_vcpu_arch {
int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
- unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
- unsigned long rip; /* needs vcpu_load_rsp_rip() */
+ /*
+ * rip and regs accesses must go through
+ * kvm_{register,rip}_{read,write} functions.
+ */
+ unsigned long regs[NR_VCPU_REGS];
+ unsigned long rip;
+ u32 regs_available;
+ u32 regs_dirty;
unsigned long cr0;
unsigned long cr2;
@@ -410,8 +418,8 @@ struct kvm_x86_ops {
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
int *exception);
- void (*cache_regs)(struct kvm_vcpu *vcpu);
- void (*decache_regs)(struct kvm_vcpu *vcpu);
+ void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ void (*decache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
^ permalink raw reply related [flat|nested] 12+ messages in thread
end of thread, other threads:[~2008-06-27 2:28 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-06-21 19:46 RFC: cache_regs in kvm_emulate_pio Marcelo Tosatti
2008-06-22 5:16 ` Avi Kivity
2008-06-22 18:05 ` Marcelo Tosatti
2008-06-24 19:33 ` Marcelo Tosatti
2008-06-26 9:18 ` Avi Kivity
2008-06-26 14:52 ` Marcelo Tosatti
2008-06-26 22:15 ` Marcelo Tosatti
2008-06-27 2:28 ` Marcelo Tosatti
-- strict thread matches above, loose matches on Subject: below --
2008-06-19 23:31 Marcelo Tosatti
2008-06-20 20:30 ` Avi Kivity
2008-06-20 21:24 ` Marcelo Tosatti
2008-06-21 7:04 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox