* [PATCH 0/8] Nested SVM unit tests
@ 2010-07-28 10:18 Avi Kivity
2010-07-28 10:18 ` [PATCH 1/8] test: move ARRAY_SIZE() to libcflat.h Avi Kivity
` (9 more replies)
0 siblings, 10 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
The following patchset adds a framework for nested SVM unit testing. As
you may guess, future fixes and enhancements to nested SVM will require
matching tests.
Currently, our nested SVM implementation has a 66.67% success rate using
these tests.
Avi Kivity (8):
test: move ARRAY_SIZE() to libcflat.h
test: move memset() to libcflat
test: add type bool
test: add processor register access functions
test: make use of new processor.h header
test: add svm definitions header
test: add msr definitions header
test: add svm tests
kvm/test/config-x86-common.mak | 2 +
kvm/test/config-x86_64.mak | 1 +
kvm/test/lib/libcflat.h | 9 +
kvm/test/lib/string.c | 11 +
kvm/test/lib/x86/msr.h | 406 ++++++++++++++++++++++++++++++++++++++++
kvm/test/lib/x86/processor.h | 246 ++++++++++++++++++++++++
kvm/test/x86/access.c | 8 -
kvm/test/x86/apic.c | 8 -
kvm/test/x86/emulator.c | 10 +-
kvm/test/x86/idt.c | 8 -
kvm/test/x86/svm.c | 180 ++++++++++++++++++
kvm/test/x86/svm.h | 328 ++++++++++++++++++++++++++++++++
kvm/test/x86/vm.c | 30 +---
kvm/test/x86/vm.h | 43 +----
kvm/test/x86/vmexit.c | 2 -
15 files changed, 1197 insertions(+), 95 deletions(-)
create mode 100644 kvm/test/lib/x86/msr.h
create mode 100644 kvm/test/lib/x86/processor.h
create mode 100644 kvm/test/x86/svm.c
create mode 100644 kvm/test/x86/svm.h
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH 1/8] test: move ARRAY_SIZE() to libcflat.h
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 10:18 ` [PATCH 2/8] test: move memset() to libcflat Avi Kivity
` (8 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/lib/libcflat.h | 2 ++
kvm/test/x86/vmexit.c | 2 --
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/kvm/test/lib/libcflat.h b/kvm/test/lib/libcflat.h
index 1da4013..7274fed 100644
--- a/kvm/test/lib/libcflat.h
+++ b/kvm/test/lib/libcflat.h
@@ -39,4 +39,6 @@ extern int vsnprintf(char *buf, int size, const char *fmt, va_list va);
extern void puts(const char *s);
+#define ARRAY_SIZE(_a) (sizeof(_a)/sizeof((_a)[0]))
+
#endif
diff --git a/kvm/test/x86/vmexit.c b/kvm/test/x86/vmexit.c
index 731316b..707d5c6 100644
--- a/kvm/test/x86/vmexit.c
+++ b/kvm/test/x86/vmexit.c
@@ -167,8 +167,6 @@ static void do_test(struct test *test)
printf("%s %d\n", test->name, (int)((t2 - t1) / iterations));
}
-#define ARRAY_SIZE(_x) (sizeof(_x) / sizeof((_x)[0]))
-
static void enable_nx(void *junk)
{
wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX_MASK);
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 2/8] test: move memset() to libcflat
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
2010-07-28 10:18 ` [PATCH 1/8] test: move ARRAY_SIZE() to libcflat.h Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 10:18 ` [PATCH 3/8] test: add type bool Avi Kivity
` (7 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/lib/libcflat.h | 3 +++
kvm/test/lib/string.c | 11 +++++++++++
kvm/test/x86/access.c | 8 --------
kvm/test/x86/idt.c | 8 --------
kvm/test/x86/vm.c | 10 ----------
5 files changed, 14 insertions(+), 26 deletions(-)
diff --git a/kvm/test/lib/libcflat.h b/kvm/test/lib/libcflat.h
index 7274fed..2e2a8bf 100644
--- a/kvm/test/lib/libcflat.h
+++ b/kvm/test/lib/libcflat.h
@@ -27,6 +27,7 @@ typedef unsigned short u16;
typedef unsigned u32;
typedef unsigned long ulong;
typedef unsigned long long u64;
+typedef unsigned long size_t;
extern void exit(int code);
extern void panic(char *fmt, ...);
@@ -39,6 +40,8 @@ extern int vsnprintf(char *buf, int size, const char *fmt, va_list va);
extern void puts(const char *s);
+extern void *memset(void *s, int c, size_t n);
+
#define ARRAY_SIZE(_a) (sizeof(_a)/sizeof((_a)[0]))
#endif
diff --git a/kvm/test/lib/string.c b/kvm/test/lib/string.c
index 42be946..acac3c0 100644
--- a/kvm/test/lib/string.c
+++ b/kvm/test/lib/string.c
@@ -19,3 +19,14 @@ char *strcat(char *dest, const char *src)
;
return dest;
}
+
+void *memset(void *s, int c, size_t n)
+{
+ size_t i;
+ char *a = s;
+
+ for (i = 0; i < n; ++i)
+ a[i] = c;
+
+ return s;
+}
diff --git a/kvm/test/x86/access.c b/kvm/test/x86/access.c
index 3338fbc..7e6ffb0 100644
--- a/kvm/test/x86/access.c
+++ b/kvm/test/x86/access.c
@@ -160,14 +160,6 @@ void lidt(idt_entry_t *idt, int nentries)
asm volatile ("lidt %0" : : "m"(dt));
}
-void memset(void *a, unsigned char v, int n)
-{
- unsigned char *x = a;
-
- while (n--)
- *x++ = v;
-}
-
unsigned short read_cs()
{
unsigned short r;
diff --git a/kvm/test/x86/idt.c b/kvm/test/x86/idt.c
index 999b3f0..590839f 100644
--- a/kvm/test/x86/idt.c
+++ b/kvm/test/x86/idt.c
@@ -39,14 +39,6 @@ unsigned short read_cs()
return r;
}
-void memset(void *a, unsigned char v, int n)
-{
- unsigned char *x = a;
-
- while (n--)
- *x++ = v;
-}
-
void set_idt_entry(idt_entry_t *e, void *addr, int dpl)
{
memset(e, 0, sizeof *e);
diff --git a/kvm/test/x86/vm.c b/kvm/test/x86/vm.c
index 0596d9c..c8f1553 100644
--- a/kvm/test/x86/vm.c
+++ b/kvm/test/x86/vm.c
@@ -14,16 +14,6 @@
static void *free = 0;
static void *vfree_top = 0;
-void *memset(void *data, int c, unsigned long len)
-{
- char *s = data;
-
- while (len--)
- *s++ = c;
-
- return data;
-}
-
static void free_memory(void *mem, unsigned long size)
{
while (size >= PAGE_SIZE) {
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 3/8] test: add type bool
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
2010-07-28 10:18 ` [PATCH 1/8] test: move ARRAY_SIZE() to libcflat.h Avi Kivity
2010-07-28 10:18 ` [PATCH 2/8] test: move memset() to libcflat Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 10:18 ` [PATCH 4/8] test: add processor register access functions Avi Kivity
` (6 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/lib/libcflat.h | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/kvm/test/lib/libcflat.h b/kvm/test/lib/libcflat.h
index 2e2a8bf..d0d3df2 100644
--- a/kvm/test/lib/libcflat.h
+++ b/kvm/test/lib/libcflat.h
@@ -28,6 +28,10 @@ typedef unsigned u32;
typedef unsigned long ulong;
typedef unsigned long long u64;
typedef unsigned long size_t;
+typedef _Bool bool;
+
+#define true 1
+#define false 0
extern void exit(int code);
extern void panic(char *fmt, ...);
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 4/8] test: add processor register access functions
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
` (2 preceding siblings ...)
2010-07-28 10:18 ` [PATCH 3/8] test: add type bool Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 10:18 ` [PATCH 5/8] test: make use of new processor.h header Avi Kivity
` (5 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/lib/x86/processor.h | 246 ++++++++++++++++++++++++++++++++++++++++++
1 files changed, 246 insertions(+), 0 deletions(-)
create mode 100644 kvm/test/lib/x86/processor.h
diff --git a/kvm/test/lib/x86/processor.h b/kvm/test/lib/x86/processor.h
new file mode 100644
index 0000000..ea44a9d
--- /dev/null
+++ b/kvm/test/lib/x86/processor.h
@@ -0,0 +1,246 @@
+#ifndef LIBCFLAT_PROCESSOR_H
+#define LIBCFLAT_PROCESSOR_H
+
+#include "libcflat.h"
+
+struct descriptor_table_ptr {
+ u16 limit;
+ ulong base;
+} __attribute__((packed));
+
+static inline void barrier(void)
+{
+ asm volatile ("" : : : "memory");
+}
+
+static inline u16 read_cs(void)
+{
+ unsigned val;
+
+ asm ("mov %%cs, %0" : "=mr"(val));
+ return val;
+}
+
+static inline u16 read_ds(void)
+{
+ unsigned val;
+
+ asm ("mov %%ds, %0" : "=mr"(val));
+ return val;
+}
+
+static inline u16 read_es(void)
+{
+ unsigned val;
+
+ asm ("mov %%es, %0" : "=mr"(val));
+ return val;
+}
+
+static inline u16 read_ss(void)
+{
+ unsigned val;
+
+ asm ("mov %%ss, %0" : "=mr"(val));
+ return val;
+}
+
+static inline u16 read_fs(void)
+{
+ unsigned val;
+
+ asm ("mov %%fs, %0" : "=mr"(val));
+ return val;
+}
+
+static inline u16 read_gs(void)
+{
+ unsigned val;
+
+ asm ("mov %%gs, %0" : "=mr"(val));
+ return val;
+}
+
+static inline void write_ds(unsigned val)
+{
+ asm ("mov %0, %%ds" : : "rm"(val) : "memory");
+}
+
+static inline void write_es(unsigned val)
+{
+ asm ("mov %0, %%es" : : "rm"(val) : "memory");
+}
+
+static inline void write_ss(unsigned val)
+{
+ asm ("mov %0, %%ss" : : "rm"(val) : "memory");
+}
+
+static inline void write_fs(unsigned val)
+{
+ asm ("mov %0, %%fs" : : "rm"(val) : "memory");
+}
+
+static inline void write_gs(unsigned val)
+{
+ asm ("mov %0, %%gs" : : "rm"(val) : "memory");
+}
+
+static inline u64 rdmsr(u32 index)
+{
+ u32 a, d;
+ asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
+ return a | ((u64)d << 32);
+}
+
+static inline void wrmsr(u32 index, u64 val)
+{
+ u32 a = val, d = val >> 32;
+ asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
+}
+
+static inline void write_cr0(ulong val)
+{
+ asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
+}
+
+static inline ulong read_cr0(void)
+{
+ ulong val;
+ asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
+ return val;
+}
+
+static inline void write_cr2(ulong val)
+{
+ asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
+}
+
+static inline ulong read_cr2(void)
+{
+ ulong val;
+ asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
+ return val;
+}
+
+static inline void write_cr3(ulong val)
+{
+ asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
+}
+
+static inline ulong read_cr3(void)
+{
+ ulong val;
+ asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
+ return val;
+}
+
+static inline void write_cr4(ulong val)
+{
+ asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
+}
+
+static inline ulong read_cr4(void)
+{
+ ulong val;
+ asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
+ return val;
+}
+
+static inline void write_cr8(ulong val)
+{
+ asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
+}
+
+static inline ulong read_cr8(void)
+{
+ ulong val;
+ asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
+ return val;
+}
+
+static inline void lgdt(const struct descriptor_table_ptr *ptr)
+{
+ asm volatile ("lgdt %0" : : "m"(*ptr));
+}
+
+static inline void sgdt(struct descriptor_table_ptr *ptr)
+{
+ asm volatile ("sgdt %0" : "=m"(*ptr));
+}
+
+static inline void lidt(const struct descriptor_table_ptr *ptr)
+{
+ asm volatile ("lidt %0" : : "m"(*ptr));
+}
+
+static inline void sidt(struct descriptor_table_ptr *ptr)
+{
+ asm volatile ("sidt %0" : "=m"(*ptr));
+}
+
+static inline void lldt(unsigned val)
+{
+ asm volatile ("lldt %0" : : "rm"(val));
+}
+
+static inline u16 sldt(void)
+{
+ u16 val;
+ asm volatile ("sldt %0" : "=rm"(val));
+ return val;
+}
+
+static inline void ltr(unsigned val)
+{
+ asm volatile ("ltr %0" : : "rm"(val));
+}
+
+static inline u16 str(void)
+{
+ u16 val;
+ asm volatile ("str %0" : "=rm"(val));
+ return val;
+}
+
+static inline void write_dr6(ulong val)
+{
+ asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
+}
+
+static inline ulong read_dr6(void)
+{
+ ulong val;
+ asm volatile ("mov %%dr6, %0" : "=r"(val));
+ return val;
+}
+
+static inline void write_dr7(ulong val)
+{
+ asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
+}
+
+static inline ulong read_dr7(void)
+{
+ ulong val;
+ asm volatile ("mov %%dr7, %0" : "=r"(val));
+ return val;
+}
+
+struct cpuid { u32 a, b, c, d; };
+
+static inline struct cpuid cpuid_indexed(u32 function, u32 index)
+{
+ struct cpuid r;
+ asm volatile ("cpuid"
+ : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
+ : "0"(function), "2"(index));
+ return r;
+}
+
+static inline struct cpuid cpuid(u32 function)
+{
+ return cpuid_indexed(function, 0);
+}
+
+#endif
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 5/8] test: make use of new processor.h header
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
` (3 preceding siblings ...)
2010-07-28 10:18 ` [PATCH 4/8] test: add processor register access functions Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 10:18 ` [PATCH 6/8] test: add svm definitions header Avi Kivity
` (4 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/x86/apic.c | 8 --------
kvm/test/x86/emulator.c | 10 +++++-----
kvm/test/x86/vm.c | 20 +++++++-------------
kvm/test/x86/vm.h | 43 ++-----------------------------------------
4 files changed, 14 insertions(+), 67 deletions(-)
diff --git a/kvm/test/x86/apic.c b/kvm/test/x86/apic.c
index b6718ec..48fa0f7 100644
--- a/kvm/test/x86/apic.c
+++ b/kvm/test/x86/apic.c
@@ -127,14 +127,6 @@ void test_enable_x2apic(void)
}
}
-static u16 read_cs(void)
-{
- u16 v;
-
- asm("mov %%cs, %0" : "=rm"(v));
- return v;
-}
-
static void init_idt(void)
{
struct {
diff --git a/kvm/test/x86/emulator.c b/kvm/test/x86/emulator.c
index db6a134..1483e3b 100644
--- a/kvm/test/x86/emulator.c
+++ b/kvm/test/x86/emulator.c
@@ -18,7 +18,7 @@ void report(const char *name, int result)
}
}
-static char str[] = "abcdefghijklmnop";
+static char st1[] = "abcdefghijklmnop";
void test_stringio()
{
@@ -27,18 +27,18 @@ void test_stringio()
"movw %0, %%dx \n\t"
"rep outsb \n\t"
: : "i"((short)TESTDEV_IO_PORT),
- "S"(str), "c"(sizeof(str) - 1));
+ "S"(st1), "c"(sizeof(st1) - 1));
asm volatile("inb %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
- report("outsb up", r == str[sizeof(str) - 2]); /* last char */
+ report("outsb up", r == st1[sizeof(st1) - 2]); /* last char */
asm volatile("std \n\t"
"movw %0, %%dx \n\t"
"rep outsb \n\t"
: : "i"((short)TESTDEV_IO_PORT),
- "S"(str + sizeof(str) - 2), "c"(sizeof(str) - 1));
+ "S"(st1 + sizeof(st1) - 2), "c"(sizeof(st1) - 1));
asm volatile("cld \n\t" : : );
asm volatile("in %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
- report("outsb down", r == str[0]);
+ report("outsb down", r == st1[0]);
}
void test_cmps_one(unsigned char *m1, unsigned char *m3)
diff --git a/kvm/test/x86/vm.c b/kvm/test/x86/vm.c
index c8f1553..62b3ba8 100644
--- a/kvm/test/x86/vm.c
+++ b/kvm/test/x86/vm.c
@@ -118,19 +118,13 @@ void install_page(unsigned long *cr3,
}
-struct gdt_table_descr
-{
- unsigned short len;
- unsigned long *table;
-} __attribute__((packed));
-
static inline void load_gdt(unsigned long *table, int nent)
{
- struct gdt_table_descr descr;
+ struct descriptor_table_ptr descr;
- descr.len = nent * 8 - 1;
- descr.table = table;
- asm volatile ( "lgdt %0" : : "m"(descr) );
+ descr.limit = nent * 8 - 1;
+ descr.base = (ulong)table;
+ lgdt(&descr);
}
#define SEG_CS_32 8
@@ -158,11 +152,11 @@ static void setup_mmu(unsigned long len)
install_page(cr3, phys, (void *)phys);
phys += PAGE_SIZE;
}
- load_cr3(virt_to_phys(cr3));
+ write_cr3(virt_to_phys(cr3));
#ifndef __x86_64__
- load_cr4(X86_CR4_PSE);
+ write_cr4(X86_CR4_PSE);
#endif
- load_cr0(X86_CR0_PG |X86_CR0_PE);
+ write_cr0(X86_CR0_PG |X86_CR0_PE);
printf("paging enabled\n");
printf("cr0 = %x\n", read_cr0());
diff --git a/kvm/test/x86/vm.h b/kvm/test/x86/vm.h
index 80dab8b..a3d2676 100644
--- a/kvm/test/x86/vm.h
+++ b/kvm/test/x86/vm.h
@@ -1,6 +1,8 @@
#ifndef VM_H
#define VM_H
+#include "processor.h"
+
#define PAGE_SIZE 4096ul
#ifdef __x86_64__
#define LARGE_PAGE_SIZE (512 * PAGE_SIZE)
@@ -41,45 +43,4 @@ static inline void *phys_to_virt(unsigned long phys)
return (void *)phys;
}
-
-static inline void load_cr3(unsigned long cr3)
-{
- asm ( "mov %0, %%cr3" : : "r"(cr3) );
-}
-
-static inline unsigned long read_cr3()
-{
- unsigned long cr3;
-
- asm volatile ( "mov %%cr3, %0" : "=r"(cr3) );
- return cr3;
-}
-
-static inline void load_cr0(unsigned long cr0)
-{
- asm volatile ( "mov %0, %%cr0" : : "r"(cr0) );
-}
-
-static inline unsigned long read_cr0()
-{
- unsigned long cr0;
-
- asm volatile ( "mov %%cr0, %0" : "=r"(cr0) );
- return cr0;
-}
-
-
-static inline void load_cr4(unsigned long cr4)
-{
- asm volatile ( "mov %0, %%cr4" : : "r"(cr4) );
-}
-
-static inline unsigned long read_cr4()
-{
- unsigned long cr4;
-
- asm volatile ( "mov %%cr4, %0" : "=r"(cr4) );
- return cr4;
-}
-
#endif
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 6/8] test: add svm definitions header
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
` (4 preceding siblings ...)
2010-07-28 10:18 ` [PATCH 5/8] test: make use of new processor.h header Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 10:18 ` [PATCH 7/8] test: add msr " Avi Kivity
` (3 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/x86/svm.h | 328 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 328 insertions(+), 0 deletions(-)
create mode 100644 kvm/test/x86/svm.h
diff --git a/kvm/test/x86/svm.h b/kvm/test/x86/svm.h
new file mode 100644
index 0000000..3fdc0d3
--- /dev/null
+++ b/kvm/test/x86/svm.h
@@ -0,0 +1,328 @@
+#ifndef __SVM_H
+#define __SVM_H
+
+#include "libcflat.h"
+
+enum {
+ INTERCEPT_INTR,
+ INTERCEPT_NMI,
+ INTERCEPT_SMI,
+ INTERCEPT_INIT,
+ INTERCEPT_VINTR,
+ INTERCEPT_SELECTIVE_CR0,
+ INTERCEPT_STORE_IDTR,
+ INTERCEPT_STORE_GDTR,
+ INTERCEPT_STORE_LDTR,
+ INTERCEPT_STORE_TR,
+ INTERCEPT_LOAD_IDTR,
+ INTERCEPT_LOAD_GDTR,
+ INTERCEPT_LOAD_LDTR,
+ INTERCEPT_LOAD_TR,
+ INTERCEPT_RDTSC,
+ INTERCEPT_RDPMC,
+ INTERCEPT_PUSHF,
+ INTERCEPT_POPF,
+ INTERCEPT_CPUID,
+ INTERCEPT_RSM,
+ INTERCEPT_IRET,
+ INTERCEPT_INTn,
+ INTERCEPT_INVD,
+ INTERCEPT_PAUSE,
+ INTERCEPT_HLT,
+ INTERCEPT_INVLPG,
+ INTERCEPT_INVLPGA,
+ INTERCEPT_IOIO_PROT,
+ INTERCEPT_MSR_PROT,
+ INTERCEPT_TASK_SWITCH,
+ INTERCEPT_FERR_FREEZE,
+ INTERCEPT_SHUTDOWN,
+ INTERCEPT_VMRUN,
+ INTERCEPT_VMMCALL,
+ INTERCEPT_VMLOAD,
+ INTERCEPT_VMSAVE,
+ INTERCEPT_STGI,
+ INTERCEPT_CLGI,
+ INTERCEPT_SKINIT,
+ INTERCEPT_RDTSCP,
+ INTERCEPT_ICEBP,
+ INTERCEPT_WBINVD,
+ INTERCEPT_MONITOR,
+ INTERCEPT_MWAIT,
+ INTERCEPT_MWAIT_COND,
+};
+
+
+struct __attribute__ ((__packed__)) vmcb_control_area {
+ u16 intercept_cr_read;
+ u16 intercept_cr_write;
+ u16 intercept_dr_read;
+ u16 intercept_dr_write;
+ u32 intercept_exceptions;
+ u64 intercept;
+ u8 reserved_1[42];
+ u16 pause_filter_count;
+ u64 iopm_base_pa;
+ u64 msrpm_base_pa;
+ u64 tsc_offset;
+ u32 asid;
+ u8 tlb_ctl;
+ u8 reserved_2[3];
+ u32 int_ctl;
+ u32 int_vector;
+ u32 int_state;
+ u8 reserved_3[4];
+ u32 exit_code;
+ u32 exit_code_hi;
+ u64 exit_info_1;
+ u64 exit_info_2;
+ u32 exit_int_info;
+ u32 exit_int_info_err;
+ u64 nested_ctl;
+ u8 reserved_4[16];
+ u32 event_inj;
+ u32 event_inj_err;
+ u64 nested_cr3;
+ u64 lbr_ctl;
+ u64 reserved_5;
+ u64 next_rip;
+ u8 reserved_6[816];
+};
+
+
+#define TLB_CONTROL_DO_NOTHING 0
+#define TLB_CONTROL_FLUSH_ALL_ASID 1
+
+#define V_TPR_MASK 0x0f
+
+#define V_IRQ_SHIFT 8
+#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+
+#define V_INTR_PRIO_SHIFT 16
+#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
+
+#define V_IGN_TPR_SHIFT 20
+#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
+#define V_INTR_MASKING_SHIFT 24
+#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+#define SVM_INTERRUPT_SHADOW_MASK 1
+
+#define SVM_IOIO_STR_SHIFT 2
+#define SVM_IOIO_REP_SHIFT 3
+#define SVM_IOIO_SIZE_SHIFT 4
+#define SVM_IOIO_ASIZE_SHIFT 7
+
+#define SVM_IOIO_TYPE_MASK 1
+#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
+#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
+#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
+#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
+struct __attribute__ ((__packed__)) vmcb_seg {
+ u16 selector;
+ u16 attrib;
+ u32 limit;
+ u64 base;
+};
+
+struct __attribute__ ((__packed__)) vmcb_save_area {
+ struct vmcb_seg es;
+ struct vmcb_seg cs;
+ struct vmcb_seg ss;
+ struct vmcb_seg ds;
+ struct vmcb_seg fs;
+ struct vmcb_seg gs;
+ struct vmcb_seg gdtr;
+ struct vmcb_seg ldtr;
+ struct vmcb_seg idtr;
+ struct vmcb_seg tr;
+ u8 reserved_1[43];
+ u8 cpl;
+ u8 reserved_2[4];
+ u64 efer;
+ u8 reserved_3[112];
+ u64 cr4;
+ u64 cr3;
+ u64 cr0;
+ u64 dr7;
+ u64 dr6;
+ u64 rflags;
+ u64 rip;
+ u8 reserved_4[88];
+ u64 rsp;
+ u8 reserved_5[24];
+ u64 rax;
+ u64 star;
+ u64 lstar;
+ u64 cstar;
+ u64 sfmask;
+ u64 kernel_gs_base;
+ u64 sysenter_cs;
+ u64 sysenter_esp;
+ u64 sysenter_eip;
+ u64 cr2;
+ u8 reserved_6[32];
+ u64 g_pat;
+ u64 dbgctl;
+ u64 br_from;
+ u64 br_to;
+ u64 last_excp_from;
+ u64 last_excp_to;
+};
+
+struct __attribute__ ((__packed__)) vmcb {
+ struct vmcb_control_area control;
+ struct vmcb_save_area save;
+};
+
+#define SVM_CPUID_FEATURE_SHIFT 2
+#define SVM_CPUID_FUNC 0x8000000a
+
+#define SVM_VM_CR_SVM_DISABLE 4
+
+#define SVM_SELECTOR_S_SHIFT 4
+#define SVM_SELECTOR_DPL_SHIFT 5
+#define SVM_SELECTOR_P_SHIFT 7
+#define SVM_SELECTOR_AVL_SHIFT 8
+#define SVM_SELECTOR_L_SHIFT 9
+#define SVM_SELECTOR_DB_SHIFT 10
+#define SVM_SELECTOR_G_SHIFT 11
+
+#define SVM_SELECTOR_TYPE_MASK (0xf)
+#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
+#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
+#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
+#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
+#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
+#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
+#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
+
+#define SVM_SELECTOR_WRITE_MASK (1 << 1)
+#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
+#define SVM_SELECTOR_CODE_MASK (1 << 3)
+
+#define INTERCEPT_CR0_MASK 1
+#define INTERCEPT_CR3_MASK (1 << 3)
+#define INTERCEPT_CR4_MASK (1 << 4)
+#define INTERCEPT_CR8_MASK (1 << 8)
+
+#define INTERCEPT_DR0_MASK 1
+#define INTERCEPT_DR1_MASK (1 << 1)
+#define INTERCEPT_DR2_MASK (1 << 2)
+#define INTERCEPT_DR3_MASK (1 << 3)
+#define INTERCEPT_DR4_MASK (1 << 4)
+#define INTERCEPT_DR5_MASK (1 << 5)
+#define INTERCEPT_DR6_MASK (1 << 6)
+#define INTERCEPT_DR7_MASK (1 << 7)
+
+#define SVM_EVTINJ_VEC_MASK 0xff
+
+#define SVM_EVTINJ_TYPE_SHIFT 8
+#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_VALID (1 << 31)
+#define SVM_EVTINJ_VALID_ERR (1 << 11)
+
+#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+#define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
+
+#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
+#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
+#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
+#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
+
+#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
+#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+
+#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
+#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
+
+#define SVM_EXIT_READ_CR0 0x000
+#define SVM_EXIT_READ_CR3 0x003
+#define SVM_EXIT_READ_CR4 0x004
+#define SVM_EXIT_READ_CR8 0x008
+#define SVM_EXIT_WRITE_CR0 0x010
+#define SVM_EXIT_WRITE_CR3 0x013
+#define SVM_EXIT_WRITE_CR4 0x014
+#define SVM_EXIT_WRITE_CR8 0x018
+#define SVM_EXIT_READ_DR0 0x020
+#define SVM_EXIT_READ_DR1 0x021
+#define SVM_EXIT_READ_DR2 0x022
+#define SVM_EXIT_READ_DR3 0x023
+#define SVM_EXIT_READ_DR4 0x024
+#define SVM_EXIT_READ_DR5 0x025
+#define SVM_EXIT_READ_DR6 0x026
+#define SVM_EXIT_READ_DR7 0x027
+#define SVM_EXIT_WRITE_DR0 0x030
+#define SVM_EXIT_WRITE_DR1 0x031
+#define SVM_EXIT_WRITE_DR2 0x032
+#define SVM_EXIT_WRITE_DR3 0x033
+#define SVM_EXIT_WRITE_DR4 0x034
+#define SVM_EXIT_WRITE_DR5 0x035
+#define SVM_EXIT_WRITE_DR6 0x036
+#define SVM_EXIT_WRITE_DR7 0x037
+#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_INTR 0x060
+#define SVM_EXIT_NMI 0x061
+#define SVM_EXIT_SMI 0x062
+#define SVM_EXIT_INIT 0x063
+#define SVM_EXIT_VINTR 0x064
+#define SVM_EXIT_CR0_SEL_WRITE 0x065
+#define SVM_EXIT_IDTR_READ 0x066
+#define SVM_EXIT_GDTR_READ 0x067
+#define SVM_EXIT_LDTR_READ 0x068
+#define SVM_EXIT_TR_READ 0x069
+#define SVM_EXIT_IDTR_WRITE 0x06a
+#define SVM_EXIT_GDTR_WRITE 0x06b
+#define SVM_EXIT_LDTR_WRITE 0x06c
+#define SVM_EXIT_TR_WRITE 0x06d
+#define SVM_EXIT_RDTSC 0x06e
+#define SVM_EXIT_RDPMC 0x06f
+#define SVM_EXIT_PUSHF 0x070
+#define SVM_EXIT_POPF 0x071
+#define SVM_EXIT_CPUID 0x072
+#define SVM_EXIT_RSM 0x073
+#define SVM_EXIT_IRET 0x074
+#define SVM_EXIT_SWINT 0x075
+#define SVM_EXIT_INVD 0x076
+#define SVM_EXIT_PAUSE 0x077
+#define SVM_EXIT_HLT 0x078
+#define SVM_EXIT_INVLPG 0x079
+#define SVM_EXIT_INVLPGA 0x07a
+#define SVM_EXIT_IOIO 0x07b
+#define SVM_EXIT_MSR 0x07c
+#define SVM_EXIT_TASK_SWITCH 0x07d
+#define SVM_EXIT_FERR_FREEZE 0x07e
+#define SVM_EXIT_SHUTDOWN 0x07f
+#define SVM_EXIT_VMRUN 0x080
+#define SVM_EXIT_VMMCALL 0x081
+#define SVM_EXIT_VMLOAD 0x082
+#define SVM_EXIT_VMSAVE 0x083
+#define SVM_EXIT_STGI 0x084
+#define SVM_EXIT_CLGI 0x085
+#define SVM_EXIT_SKINIT 0x086
+#define SVM_EXIT_RDTSCP 0x087
+#define SVM_EXIT_ICEBP 0x088
+#define SVM_EXIT_WBINVD 0x089
+#define SVM_EXIT_MONITOR 0x08a
+#define SVM_EXIT_MWAIT 0x08b
+#define SVM_EXIT_MWAIT_COND 0x08c
+#define SVM_EXIT_NPF 0x400
+
+#define SVM_EXIT_ERR -1
+
+#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
+
+#endif
+
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 7/8] test: add msr definitions header
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
` (5 preceding siblings ...)
2010-07-28 10:18 ` [PATCH 6/8] test: add svm definitions header Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 10:18 ` [PATCH 8/8] test: add svm tests Avi Kivity
` (2 subsequent siblings)
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/lib/x86/msr.h | 406 ++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 406 insertions(+), 0 deletions(-)
create mode 100644 kvm/test/lib/x86/msr.h
diff --git a/kvm/test/lib/x86/msr.h b/kvm/test/lib/x86/msr.h
new file mode 100644
index 0000000..509a421
--- /dev/null
+++ b/kvm/test/lib/x86/msr.h
@@ -0,0 +1,406 @@
+#ifndef _ASM_X86_MSR_INDEX_H
+#define _ASM_X86_MSR_INDEX_H
+
+/* CPU model specific register (MSR) numbers */
+
+/* x86-64 specific MSRs */
+#define MSR_EFER 0xc0000080 /* extended feature register */
+#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
+#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
+#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
+#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
+#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
+#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
+#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
+#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
+
+/* EFER bits: */
+#define _EFER_SCE 0 /* SYSCALL/SYSRET */
+#define _EFER_LME 8 /* Long mode enable */
+#define _EFER_LMA 10 /* Long mode active (read-only) */
+#define _EFER_NX 11 /* No execute enable */
+#define _EFER_SVME 12 /* Enable virtualization */
+#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
+#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+
+#define EFER_SCE (1<<_EFER_SCE)
+#define EFER_LME (1<<_EFER_LME)
+#define EFER_LMA (1<<_EFER_LMA)
+#define EFER_NX (1<<_EFER_NX)
+#define EFER_SVME (1<<_EFER_SVME)
+#define EFER_LMSLE (1<<_EFER_LMSLE)
+#define EFER_FFXSR (1<<_EFER_FFXSR)
+
+/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_PERFCTR0 0x000000c1
+#define MSR_IA32_PERFCTR1 0x000000c2
+#define MSR_FSB_FREQ 0x000000cd
+
+#define MSR_MTRRcap 0x000000fe
+#define MSR_IA32_BBL_CR_CTL 0x00000119
+
+#define MSR_IA32_SYSENTER_CS 0x00000174
+#define MSR_IA32_SYSENTER_ESP 0x00000175
+#define MSR_IA32_SYSENTER_EIP 0x00000176
+
+#define MSR_IA32_MCG_CAP 0x00000179
+#define MSR_IA32_MCG_STATUS 0x0000017a
+#define MSR_IA32_MCG_CTL 0x0000017b
+
+#define MSR_IA32_PEBS_ENABLE 0x000003f1
+#define MSR_IA32_DS_AREA 0x00000600
+#define MSR_IA32_PERF_CAPABILITIES 0x00000345
+
+#define MSR_MTRRfix64K_00000 0x00000250
+#define MSR_MTRRfix16K_80000 0x00000258
+#define MSR_MTRRfix16K_A0000 0x00000259
+#define MSR_MTRRfix4K_C0000 0x00000268
+#define MSR_MTRRfix4K_C8000 0x00000269
+#define MSR_MTRRfix4K_D0000 0x0000026a
+#define MSR_MTRRfix4K_D8000 0x0000026b
+#define MSR_MTRRfix4K_E0000 0x0000026c
+#define MSR_MTRRfix4K_E8000 0x0000026d
+#define MSR_MTRRfix4K_F0000 0x0000026e
+#define MSR_MTRRfix4K_F8000 0x0000026f
+#define MSR_MTRRdefType 0x000002ff
+
+#define MSR_IA32_CR_PAT 0x00000277
+
+#define MSR_IA32_DEBUGCTLMSR 0x000001d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
+#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
+#define MSR_IA32_LASTINTFROMIP 0x000001dd
+#define MSR_IA32_LASTINTTOIP 0x000001de
+
+/* DEBUGCTLMSR bits (others vary by model): */
+#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
+#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+#define DEBUGCTLMSR_TR (1UL << 6)
+#define DEBUGCTLMSR_BTS (1UL << 7)
+#define DEBUGCTLMSR_BTINT (1UL << 8)
+#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
+#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
+#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+
+#define MSR_IA32_MC0_CTL 0x00000400
+#define MSR_IA32_MC0_STATUS 0x00000401
+#define MSR_IA32_MC0_ADDR 0x00000402
+#define MSR_IA32_MC0_MISC 0x00000403
+
+#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
+#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
+#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
+#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
+
+/* These are consecutive and not in the normal 4er MCE bank block */
+#define MSR_IA32_MC0_CTL2 0x00000280
+#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
+
+#define CMCI_EN (1ULL << 30)
+#define CMCI_THRESHOLD_MASK 0xffffULL
+
+#define MSR_P6_PERFCTR0 0x000000c1
+#define MSR_P6_PERFCTR1 0x000000c2
+#define MSR_P6_EVNTSEL0 0x00000186
+#define MSR_P6_EVNTSEL1 0x00000187
+
+/* AMD64 MSRs. Not complete. See the architecture manual for a more
+ complete list. */
+
+#define MSR_AMD64_PATCH_LEVEL 0x0000008b
+#define MSR_AMD64_NB_CFG 0xc001001f
+#define MSR_AMD64_PATCH_LOADER 0xc0010020
+#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
+#define MSR_AMD64_OSVW_STATUS 0xc0010141
+#define MSR_AMD64_DC_CFG 0xc0011022
+#define MSR_AMD64_IBSFETCHCTL 0xc0011030
+#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
+#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
+#define MSR_AMD64_IBSOPCTL 0xc0011033
+#define MSR_AMD64_IBSOPRIP 0xc0011034
+#define MSR_AMD64_IBSOPDATA 0xc0011035
+#define MSR_AMD64_IBSOPDATA2 0xc0011036
+#define MSR_AMD64_IBSOPDATA3 0xc0011037
+#define MSR_AMD64_IBSDCLINAD 0xc0011038
+#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
+#define MSR_AMD64_IBSCTL 0xc001103a
+
+/* Fam 10h MSRs */
+#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
+#define FAM10H_MMIO_CONF_ENABLE (1<<0)
+#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
+#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
+#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
+#define FAM10H_MMIO_CONF_BASE_SHIFT 20
+#define MSR_FAM10H_NODE_ID 0xc001100c
+
+/* K8 MSRs */
+#define MSR_K8_TOP_MEM1 0xc001001a
+#define MSR_K8_TOP_MEM2 0xc001001d
+#define MSR_K8_SYSCFG 0xc0010010
+#define MSR_K8_INT_PENDING_MSG 0xc0010055
+/* C1E active bits in int pending message */
+#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
+#define MSR_K8_TSEG_ADDR 0xc0010112
+#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
+#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
+#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
+
+/* K7 MSRs */
+#define MSR_K7_EVNTSEL0 0xc0010000
+#define MSR_K7_PERFCTR0 0xc0010004
+#define MSR_K7_EVNTSEL1 0xc0010001
+#define MSR_K7_PERFCTR1 0xc0010005
+#define MSR_K7_EVNTSEL2 0xc0010002
+#define MSR_K7_PERFCTR2 0xc0010006
+#define MSR_K7_EVNTSEL3 0xc0010003
+#define MSR_K7_PERFCTR3 0xc0010007
+#define MSR_K7_CLK_CTL 0xc001001b
+#define MSR_K7_HWCR 0xc0010015
+#define MSR_K7_FID_VID_CTL 0xc0010041
+#define MSR_K7_FID_VID_STATUS 0xc0010042
+
+/* K6 MSRs */
+#define MSR_K6_EFER 0xc0000080
+#define MSR_K6_STAR 0xc0000081
+#define MSR_K6_WHCR 0xc0000082
+#define MSR_K6_UWCCR 0xc0000085
+#define MSR_K6_EPMR 0xc0000086
+#define MSR_K6_PSOR 0xc0000087
+#define MSR_K6_PFIR 0xc0000088
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x00000107
+#define MSR_IDT_FCR2 0x00000108
+#define MSR_IDT_FCR3 0x00000109
+#define MSR_IDT_FCR4 0x0000010a
+
+#define MSR_IDT_MCR0 0x00000110
+#define MSR_IDT_MCR1 0x00000111
+#define MSR_IDT_MCR2 0x00000112
+#define MSR_IDT_MCR3 0x00000113
+#define MSR_IDT_MCR4 0x00000114
+#define MSR_IDT_MCR5 0x00000115
+#define MSR_IDT_MCR6 0x00000116
+#define MSR_IDT_MCR7 0x00000117
+#define MSR_IDT_MCR_CTRL 0x00000120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x00001107
+#define MSR_VIA_LONGHAUL 0x0000110a
+#define MSR_VIA_RNG 0x0000110b
+#define MSR_VIA_BCR2 0x00001147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0x00000000
+#define MSR_IA32_P5_MC_TYPE 0x00000001
+#define MSR_IA32_TSC 0x00000010
+#define MSR_IA32_PLATFORM_ID 0x00000017
+#define MSR_IA32_EBL_CR_POWERON 0x0000002a
+#define MSR_IA32_FEATURE_CONTROL 0x0000003a
+
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+
+#define MSR_IA32_APICBASE 0x0000001b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE 0x00000079
+#define MSR_IA32_UCODE_REV 0x0000008b
+
+#define MSR_IA32_PERF_STATUS 0x00000198
+#define MSR_IA32_PERF_CTL 0x00000199
+
+#define MSR_IA32_MPERF 0x000000e7
+#define MSR_IA32_APERF 0x000000e8
+
+#define MSR_IA32_THERM_CONTROL 0x0000019a
+#define MSR_IA32_THERM_INTERRUPT 0x0000019b
+
+#define THERM_INT_LOW_ENABLE (1 << 0)
+#define THERM_INT_HIGH_ENABLE (1 << 1)
+
+#define MSR_IA32_THERM_STATUS 0x0000019c
+
+#define THERM_STATUS_PROCHOT (1 << 0)
+
+#define MSR_THERM2_CTL 0x0000019d
+
+#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
+
+#define MSR_IA32_MISC_ENABLE 0x000001a0
+
+#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
+
+/* MISC_ENABLE bits: architectural */
+#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
+#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
+#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7)
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11)
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12)
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
+#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22)
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23)
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34)
+
+/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2)
+#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3)
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4)
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6)
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8)
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
+#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10)
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10)
+#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13)
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24)
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX 0x00000180
+#define MSR_IA32_MCG_EBX 0x00000181
+#define MSR_IA32_MCG_ECX 0x00000182
+#define MSR_IA32_MCG_EDX 0x00000183
+#define MSR_IA32_MCG_ESI 0x00000184
+#define MSR_IA32_MCG_EDI 0x00000185
+#define MSR_IA32_MCG_EBP 0x00000186
+#define MSR_IA32_MCG_ESP 0x00000187
+#define MSR_IA32_MCG_EFLAGS 0x00000188
+#define MSR_IA32_MCG_EIP 0x00000189
+#define MSR_IA32_MCG_RESERVED 0x0000018a
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0 0x00000300
+#define MSR_P4_BPU_PERFCTR1 0x00000301
+#define MSR_P4_BPU_PERFCTR2 0x00000302
+#define MSR_P4_BPU_PERFCTR3 0x00000303
+#define MSR_P4_MS_PERFCTR0 0x00000304
+#define MSR_P4_MS_PERFCTR1 0x00000305
+#define MSR_P4_MS_PERFCTR2 0x00000306
+#define MSR_P4_MS_PERFCTR3 0x00000307
+#define MSR_P4_FLAME_PERFCTR0 0x00000308
+#define MSR_P4_FLAME_PERFCTR1 0x00000309
+#define MSR_P4_FLAME_PERFCTR2 0x0000030a
+#define MSR_P4_FLAME_PERFCTR3 0x0000030b
+#define MSR_P4_IQ_PERFCTR0 0x0000030c
+#define MSR_P4_IQ_PERFCTR1 0x0000030d
+#define MSR_P4_IQ_PERFCTR2 0x0000030e
+#define MSR_P4_IQ_PERFCTR3 0x0000030f
+#define MSR_P4_IQ_PERFCTR4 0x00000310
+#define MSR_P4_IQ_PERFCTR5 0x00000311
+#define MSR_P4_BPU_CCCR0 0x00000360
+#define MSR_P4_BPU_CCCR1 0x00000361
+#define MSR_P4_BPU_CCCR2 0x00000362
+#define MSR_P4_BPU_CCCR3 0x00000363
+#define MSR_P4_MS_CCCR0 0x00000364
+#define MSR_P4_MS_CCCR1 0x00000365
+#define MSR_P4_MS_CCCR2 0x00000366
+#define MSR_P4_MS_CCCR3 0x00000367
+#define MSR_P4_FLAME_CCCR0 0x00000368
+#define MSR_P4_FLAME_CCCR1 0x00000369
+#define MSR_P4_FLAME_CCCR2 0x0000036a
+#define MSR_P4_FLAME_CCCR3 0x0000036b
+#define MSR_P4_IQ_CCCR0 0x0000036c
+#define MSR_P4_IQ_CCCR1 0x0000036d
+#define MSR_P4_IQ_CCCR2 0x0000036e
+#define MSR_P4_IQ_CCCR3 0x0000036f
+#define MSR_P4_IQ_CCCR4 0x00000370
+#define MSR_P4_IQ_CCCR5 0x00000371
+#define MSR_P4_ALF_ESCR0 0x000003ca
+#define MSR_P4_ALF_ESCR1 0x000003cb
+#define MSR_P4_BPU_ESCR0 0x000003b2
+#define MSR_P4_BPU_ESCR1 0x000003b3
+#define MSR_P4_BSU_ESCR0 0x000003a0
+#define MSR_P4_BSU_ESCR1 0x000003a1
+#define MSR_P4_CRU_ESCR0 0x000003b8
+#define MSR_P4_CRU_ESCR1 0x000003b9
+#define MSR_P4_CRU_ESCR2 0x000003cc
+#define MSR_P4_CRU_ESCR3 0x000003cd
+#define MSR_P4_CRU_ESCR4 0x000003e0
+#define MSR_P4_CRU_ESCR5 0x000003e1
+#define MSR_P4_DAC_ESCR0 0x000003a8
+#define MSR_P4_DAC_ESCR1 0x000003a9
+#define MSR_P4_FIRM_ESCR0 0x000003a4
+#define MSR_P4_FIRM_ESCR1 0x000003a5
+#define MSR_P4_FLAME_ESCR0 0x000003a6
+#define MSR_P4_FLAME_ESCR1 0x000003a7
+#define MSR_P4_FSB_ESCR0 0x000003a2
+#define MSR_P4_FSB_ESCR1 0x000003a3
+#define MSR_P4_IQ_ESCR0 0x000003ba
+#define MSR_P4_IQ_ESCR1 0x000003bb
+#define MSR_P4_IS_ESCR0 0x000003b4
+#define MSR_P4_IS_ESCR1 0x000003b5
+#define MSR_P4_ITLB_ESCR0 0x000003b6
+#define MSR_P4_ITLB_ESCR1 0x000003b7
+#define MSR_P4_IX_ESCR0 0x000003c8
+#define MSR_P4_IX_ESCR1 0x000003c9
+#define MSR_P4_MOB_ESCR0 0x000003aa
+#define MSR_P4_MOB_ESCR1 0x000003ab
+#define MSR_P4_MS_ESCR0 0x000003c0
+#define MSR_P4_MS_ESCR1 0x000003c1
+#define MSR_P4_PMH_ESCR0 0x000003ac
+#define MSR_P4_PMH_ESCR1 0x000003ad
+#define MSR_P4_RAT_ESCR0 0x000003bc
+#define MSR_P4_RAT_ESCR1 0x000003bd
+#define MSR_P4_SAAT_ESCR0 0x000003ae
+#define MSR_P4_SAAT_ESCR1 0x000003af
+#define MSR_P4_SSU_ESCR0 0x000003be
+#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
+
+#define MSR_P4_TBPU_ESCR0 0x000003c2
+#define MSR_P4_TBPU_ESCR1 0x000003c3
+#define MSR_P4_TC_ESCR0 0x000003c4
+#define MSR_P4_TC_ESCR1 0x000003c5
+#define MSR_P4_U2L_ESCR0 0x000003b0
+#define MSR_P4_U2L_ESCR1 0x000003b1
+
+#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
+
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
+#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
+#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+
+/* Geode defined MSRs */
+#define MSR_GEODE_BUSCONT_CONF0 0x00001900
+
+/* Intel VT MSRs */
+#define MSR_IA32_VMX_BASIC 0x00000480
+#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
+#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
+#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
+#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
+#define MSR_IA32_VMX_MISC 0x00000485
+#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
+#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
+#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
+#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
+#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
+#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
+#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
+
+/* AMD-V MSRs */
+
+#define MSR_VM_CR 0xc0010114
+#define MSR_VM_IGNNE 0xc0010115
+#define MSR_VM_HSAVE_PA 0xc0010117
+
+#endif /* _ASM_X86_MSR_INDEX_H */
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 8/8] test: add svm tests
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
` (6 preceding siblings ...)
2010-07-28 10:18 ` [PATCH 7/8] test: add msr " Avi Kivity
@ 2010-07-28 10:18 ` Avi Kivity
2010-07-28 11:40 ` [PATCH 0/8] Nested SVM unit tests Roedel, Joerg
2010-07-29 16:55 ` Marcelo Tosatti
9 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 10:18 UTC (permalink / raw)
To: Joerg Roedel, Marcelo Tosatti, kvm
Signed-off-by: Avi Kivity <avi@redhat.com>
---
kvm/test/config-x86-common.mak | 2 +
kvm/test/config-x86_64.mak | 1 +
kvm/test/x86/svm.c | 180 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 183 insertions(+), 0 deletions(-)
create mode 100644 kvm/test/x86/svm.c
diff --git a/kvm/test/config-x86-common.mak b/kvm/test/config-x86-common.mak
index 00817dc..19bffd4 100644
--- a/kvm/test/config-x86-common.mak
+++ b/kvm/test/config-x86-common.mak
@@ -68,6 +68,8 @@ $(TEST_DIR)/xsave.flat: $(cstart.o) $(TEST_DIR)/idt.o $(TEST_DIR)/xsave.o
$(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
$(TEST_DIR)/print.o $(TEST_DIR)/vm.o
+$(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
+
arch_clean:
$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
diff --git a/kvm/test/config-x86_64.mak b/kvm/test/config-x86_64.mak
index 3ffbcc1..b99cf85 100644
--- a/kvm/test/config-x86_64.mak
+++ b/kvm/test/config-x86_64.mak
@@ -7,5 +7,6 @@ CFLAGS += -D__x86_64__
tests = $(TEST_DIR)/access.flat $(TEST_DIR)/apic.flat \
$(TEST_DIR)/emulator.flat $(TEST_DIR)/idt_test.flat \
$(TEST_DIR)/xsave.flat $(TEST_DIR)/rmap_chain.flat
+tests += $(TEST_DIR)/svm.flat
include config-x86-common.mak
diff --git a/kvm/test/x86/svm.c b/kvm/test/x86/svm.c
new file mode 100644
index 0000000..af0e60c
--- /dev/null
+++ b/kvm/test/x86/svm.c
@@ -0,0 +1,180 @@
+#include "svm.h"
+#include "libcflat.h"
+#include "processor.h"
+#include "msr.h"
+#include "vm.h"
+
+static void setup_svm(void)
+{
+ void *hsave = alloc_page();
+
+ wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
+ wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
+}
+
+static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
+ u64 base, u32 limit, u32 attr)
+{
+ seg->selector = selector;
+ seg->attrib = attr;
+ seg->limit = limit;
+ seg->base = base;
+}
+
+static void vmcb_ident(struct vmcb *vmcb)
+{
+ u64 vmcb_phys = virt_to_phys(vmcb);
+ struct vmcb_save_area *save = &vmcb->save;
+ struct vmcb_control_area *ctrl = &vmcb->control;
+ u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
+ | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
+ u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
+ | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
+ struct descriptor_table_ptr desc_table_ptr;
+
+ memset(vmcb, 0, sizeof(*vmcb));
+ asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
+ vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
+ vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
+ vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
+ vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
+ sgdt(&desc_table_ptr);
+ vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
+ sidt(&desc_table_ptr);
+ vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
+ save->cpl = 0;
+ save->efer = rdmsr(MSR_EFER);
+ save->cr4 = read_cr4();
+ save->cr3 = read_cr3();
+ save->cr0 = read_cr0();
+ save->dr7 = read_dr7();
+ save->dr6 = read_dr6();
+ save->cr2 = read_cr2();
+ save->g_pat = rdmsr(MSR_IA32_CR_PAT);
+ save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+ ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
+}
+
+struct test {
+ const char *name;
+ void (*prepare)(struct test *test);
+ void (*guest_func)(struct test *test);
+ bool (*finished)(struct test *test);
+ bool (*succeeded)(struct test *test);
+ struct vmcb *vmcb;
+ int exits;
+};
+
+static void test_thunk(struct test *test)
+{
+ test->guest_func(test);
+ asm volatile ("vmmcall" : : : "memory");
+}
+
+static bool test_run(struct test *test, struct vmcb *vmcb)
+{
+ u64 vmcb_phys = virt_to_phys(vmcb);
+ u64 guest_stack[10000];
+ bool success;
+
+ test->vmcb = vmcb;
+ test->prepare(test);
+ vmcb->save.rip = (ulong)test_thunk;
+ vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
+ do {
+ asm volatile (
+ "clgi \n\t"
+ "vmload \n\t"
+ "push %%rbp \n\t"
+ "push %1 \n\t"
+ "vmrun \n\t"
+ "pop %1 \n\t"
+ "pop %%rbp \n\t"
+ "vmsave \n\t"
+ "stgi"
+ : : "a"(vmcb_phys), "D"(test)
+ : "rbx", "rcx", "rdx", "rsi",
+ "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
+ "memory");
+ ++test->exits;
+ } while (!test->finished(test));
+
+ success = test->succeeded(test);
+
+ printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
+
+ return success;
+}
+
+static void default_prepare(struct test *test)
+{
+ vmcb_ident(test->vmcb);
+}
+
+static bool default_finished(struct test *test)
+{
+ return true; /* one vmexit */
+}
+
+static void null_test(struct test *test)
+{
+}
+
+static bool null_check(struct test *test)
+{
+ return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
+}
+
+static void prepare_no_vmrun_int(struct test *test)
+{
+ test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
+}
+
+static bool check_no_vmrun_int(struct test *test)
+{
+ return test->vmcb->control.exit_code == SVM_EXIT_ERR;
+}
+
+static void test_vmrun(struct test *test)
+{
+ asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
+}
+
+static bool check_vmrun(struct test *test)
+{
+ return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
+}
+
+static struct test tests[] = {
+ { "null", default_prepare, null_test, default_finished, null_check },
+ { "vmrun", default_prepare, test_vmrun, default_finished, check_vmrun },
+ { "vmrun intercept check", prepare_no_vmrun_int, null_test,
+ default_finished, check_no_vmrun_int },
+
+};
+
+int main(int ac, char **av)
+{
+ int i, nr, passed;
+ struct vmcb *vmcb;
+
+ setup_vm();
+
+ if (!(cpuid(0x80000001).c & 4)) {
+ printf("SVM not availble\n");
+ return 0;
+ }
+
+ setup_svm();
+
+ vmcb = alloc_page();
+
+ nr = ARRAY_SIZE(tests);
+ passed = 0;
+ for (i = 0; i < nr; ++i) {
+ passed += test_run(&tests[i], vmcb);
+ }
+
+ printf("\nSUMMARY: %d TESTS, %d FAILURES\n", nr, (nr - passed));
+ return passed == nr ? 0 : 1;
+}
--
1.7.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH 0/8] Nested SVM unit tests
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
` (7 preceding siblings ...)
2010-07-28 10:18 ` [PATCH 8/8] test: add svm tests Avi Kivity
@ 2010-07-28 11:40 ` Roedel, Joerg
2010-07-28 11:53 ` Avi Kivity
2010-07-29 16:55 ` Marcelo Tosatti
9 siblings, 1 reply; 14+ messages in thread
From: Roedel, Joerg @ 2010-07-28 11:40 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm@vger.kernel.org
On Wed, Jul 28, 2010 at 06:18:19AM -0400, Avi Kivity wrote:
> The following patchset adds a framework for nested SVM unit testing. As
> you may guess, future fixes and enhancements to nested SVM will require
> matching tests.
Cool stuff. I guess the third test failed (vmrun intercept check),
should be easy to fix. This is generally a very useful thing.
> Currently, our nested SVM implementation has a 66.67% success rate using
> these tests.
Okay, thats the downside :-) Anyway, are KVM changes not realated to
nested-svm required to pass these tests too?
How about fixes for race conditions? We had bugs in the past with lost
interrupts in the L2 guest which only showed up under very special
conditions that may be hard to reproduce directly. They may depend on an
L1 interrupt becoming pending for example.
(And I think we still have one bug in this area left which is not yet
root-caused)
Joerg
--
Joerg Roedel - AMD Operating System Research Center
Advanced Micro Devices GmbH Einsteinring 24 85609 Dornach
General Managers: Alberto Bozzo, Andrew Bowd
Registration: Dornach, Landkr. Muenchen; Registerger. Muenchen, HRB Nr. 43632
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 0/8] Nested SVM unit tests
2010-07-28 11:40 ` [PATCH 0/8] Nested SVM unit tests Roedel, Joerg
@ 2010-07-28 11:53 ` Avi Kivity
2010-07-28 12:39 ` Roedel, Joerg
0 siblings, 1 reply; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 11:53 UTC (permalink / raw)
To: Roedel, Joerg; +Cc: Marcelo Tosatti, kvm@vger.kernel.org
On 07/28/2010 02:40 PM, Roedel, Joerg wrote:
> On Wed, Jul 28, 2010 at 06:18:19AM -0400, Avi Kivity wrote:
>> The following patchset adds a framework for nested SVM unit testing. As
>> you may guess, future fixes and enhancements to nested SVM will require
>> matching tests.
> Cool stuff. I guess the third test failed (vmrun intercept check),
> should be easy to fix. This is generally a very useful thing.
>
Yes.
>> Currently, our nested SVM implementation has a 66.67% success rate using
>> these tests.
> Okay, thats the downside :-) Anyway, are KVM changes not realated to
> nested-svm required to pass these tests too?
Yes. These tests are run as part of autotest (well, I forgot to update
kvm/test/x86/unittests.cfg). If something wants to be in 'master', it
needs to pass these tests. So it will be a good idea to add tests for
things that are liable to break as part of normal code churn, e.g. event
injection.
> How about fixes for race conditions? We had bugs in the past with lost
> interrupts in the L2 guest which only showed up under very special
> conditions that may be hard to reproduce directly. They may depend on an
> L1 interrupt becoming pending for example.
> (And I think we still have one bug in this area left which is not yet
> root-caused)
The test framework supports smp, so you can have the second vcpu do
nasty stuff. The difficult part is when the failure depends on host
state that is not visible to the guest. Perhaps we can add test-only
hypercalls that allow the guest to manipulate this state (drop shadows,
etc.)
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 0/8] Nested SVM unit tests
2010-07-28 11:53 ` Avi Kivity
@ 2010-07-28 12:39 ` Roedel, Joerg
2010-07-28 12:46 ` Avi Kivity
0 siblings, 1 reply; 14+ messages in thread
From: Roedel, Joerg @ 2010-07-28 12:39 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm@vger.kernel.org
On Wed, Jul 28, 2010 at 07:53:59AM -0400, Avi Kivity wrote:
> The test framework supports smp, so you can have the second vcpu do
> nasty stuff. The difficult part is when the failure depends on host
> state that is not visible to the guest. Perhaps we can add test-only
> hypercalls that allow the guest to manipulate this state (drop shadows,
> etc.)
I just tried this out and it works great so far. Should be quite easy to
add new tests to this framework (as long as the failure cases are no
race conditions). I guess the small multiboot-kernels run with an
identity mapped page table?
Joerg
--
Joerg Roedel - AMD Operating System Research Center
Advanced Micro Devices GmbH Einsteinring 24 85609 Dornach
General Managers: Alberto Bozzo, Andrew Bowd
Registration: Dornach, Landkr. Muenchen; Registerger. Muenchen, HRB Nr. 43632
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 0/8] Nested SVM unit tests
2010-07-28 12:39 ` Roedel, Joerg
@ 2010-07-28 12:46 ` Avi Kivity
0 siblings, 0 replies; 14+ messages in thread
From: Avi Kivity @ 2010-07-28 12:46 UTC (permalink / raw)
To: Roedel, Joerg; +Cc: Marcelo Tosatti, kvm@vger.kernel.org
On 07/28/2010 03:39 PM, Roedel, Joerg wrote:
> On Wed, Jul 28, 2010 at 07:53:59AM -0400, Avi Kivity wrote:
>> The test framework supports smp, so you can have the second vcpu do
>> nasty stuff. The difficult part is when the failure depends on host
>> state that is not visible to the guest. Perhaps we can add test-only
>> hypercalls that allow the guest to manipulate this state (drop shadows,
>> etc.)
> I just tried this out and it works great so far. Should be quite easy to
> add new tests to this framework (as long as the failure cases are no
> race conditions). I guess the small multiboot-kernels run with an
> identity mapped page table?
>
Yes. vm.c has some functions to add more complicated mappings.
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 0/8] Nested SVM unit tests
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
` (8 preceding siblings ...)
2010-07-28 11:40 ` [PATCH 0/8] Nested SVM unit tests Roedel, Joerg
@ 2010-07-29 16:55 ` Marcelo Tosatti
9 siblings, 0 replies; 14+ messages in thread
From: Marcelo Tosatti @ 2010-07-29 16:55 UTC (permalink / raw)
To: Avi Kivity; +Cc: Joerg Roedel, kvm
On Wed, Jul 28, 2010 at 01:18:19PM +0300, Avi Kivity wrote:
> The following patchset adds a framework for nested SVM unit testing. As
> you may guess, future fixes and enhancements to nested SVM will require
> matching tests.
>
> Currently, our nested SVM implementation has a 66.67% success rate using
> these tests.
>
> Avi Kivity (8):
> test: move ARRAY_SIZE() to libcflat.h
> test: move memset() to libcflat
> test: add type bool
> test: add processor register access functions
> test: make use of new processor.h header
> test: add svm definitions header
> test: add msr definitions header
> test: add svm tests
Applied, thanks.
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2010-07-29 21:47 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-07-28 10:18 [PATCH 0/8] Nested SVM unit tests Avi Kivity
2010-07-28 10:18 ` [PATCH 1/8] test: move ARRAY_SIZE() to libcflat.h Avi Kivity
2010-07-28 10:18 ` [PATCH 2/8] test: move memset() to libcflat Avi Kivity
2010-07-28 10:18 ` [PATCH 3/8] test: add type bool Avi Kivity
2010-07-28 10:18 ` [PATCH 4/8] test: add processor register access functions Avi Kivity
2010-07-28 10:18 ` [PATCH 5/8] test: make use of new processor.h header Avi Kivity
2010-07-28 10:18 ` [PATCH 6/8] test: add svm definitions header Avi Kivity
2010-07-28 10:18 ` [PATCH 7/8] test: add msr " Avi Kivity
2010-07-28 10:18 ` [PATCH 8/8] test: add svm tests Avi Kivity
2010-07-28 11:40 ` [PATCH 0/8] Nested SVM unit tests Roedel, Joerg
2010-07-28 11:53 ` Avi Kivity
2010-07-28 12:39 ` Roedel, Joerg
2010-07-28 12:46 ` Avi Kivity
2010-07-29 16:55 ` Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox