kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases
@ 2013-08-15 11:45 Arthur Chunqi Li
  2013-08-15 11:45 ` [PATCH v2 1/4] kvm-unit-tests: VMX: Add test cases for PAT and EFER Arthur Chunqi Li
                   ` (4 more replies)
  0 siblings, 5 replies; 11+ messages in thread
From: Arthur Chunqi Li @ 2013-08-15 11:45 UTC (permalink / raw)
  To: kvm; +Cc: jan.kiszka, gleb, pbonzini, Arthur Chunqi Li

Add a series of test cases for nested VMX in kvm-unit-tests.

Arthur Chunqi Li (4):
  kvm-unit-tests: VMX: Add test cases for PAT and EFER
  kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing
  kvm-unit-tests: VMX: Add test cases for I/O bitmaps
  kvm-unit-tests: VMX: Add test cases for instruction      interception

 lib/x86/vm.h    |    4 +
 x86/vmx.c       |    3 +-
 x86/vmx.h       |   20 +-
 x86/vmx_tests.c |  714 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 736 insertions(+), 5 deletions(-)

-- 
1.7.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 1/4] kvm-unit-tests: VMX: Add test cases for PAT and EFER
  2013-08-15 11:45 [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
@ 2013-08-15 11:45 ` Arthur Chunqi Li
  2013-08-15 11:45 ` [PATCH v2 2/4] kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing Arthur Chunqi Li
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 11+ messages in thread
From: Arthur Chunqi Li @ 2013-08-15 11:45 UTC (permalink / raw)
  To: kvm; +Cc: jan.kiszka, gleb, pbonzini, Arthur Chunqi Li

Add test cases for ENT_LOAD_PAT, ENT_LOAD_EFER, EXI_LOAD_PAT,
EXI_SAVE_PAT, EXI_LOAD_EFER, EXI_SAVE_PAT flags in enter/exit
control fields.

Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
---
 x86/vmx.h       |    7 +++
 x86/vmx_tests.c |  185 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 192 insertions(+)

diff --git a/x86/vmx.h b/x86/vmx.h
index 28595d8..18961f1 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -152,10 +152,12 @@ enum Encoding {
 	GUEST_DEBUGCTL		= 0x2802ul,
 	GUEST_DEBUGCTL_HI	= 0x2803ul,
 	GUEST_EFER		= 0x2806ul,
+	GUEST_PAT		= 0x2804ul,
 	GUEST_PERF_GLOBAL_CTRL	= 0x2808ul,
 	GUEST_PDPTE		= 0x280aul,
 
 	/* 64-Bit Host State */
+	HOST_PAT		= 0x2c00ul,
 	HOST_EFER		= 0x2c02ul,
 	HOST_PERF_GLOBAL_CTRL	= 0x2c04ul,
 
@@ -330,11 +332,15 @@ enum Ctrl_exi {
 	EXI_HOST_64             = 1UL << 9,
 	EXI_LOAD_PERF		= 1UL << 12,
 	EXI_INTA                = 1UL << 15,
+	EXI_SAVE_PAT		= 1UL << 18,
+	EXI_LOAD_PAT		= 1UL << 19,
+	EXI_SAVE_EFER		= 1UL << 20,
 	EXI_LOAD_EFER           = 1UL << 21,
 };
 
 enum Ctrl_ent {
 	ENT_GUEST_64            = 1UL << 9,
+	ENT_LOAD_PAT		= 1UL << 14,
 	ENT_LOAD_EFER           = 1UL << 15,
 };
 
@@ -354,6 +360,7 @@ enum Ctrl0 {
 	CPU_NMI_WINDOW		= 1ul << 22,
 	CPU_IO			= 1ul << 24,
 	CPU_IO_BITMAP		= 1ul << 25,
+	CPU_MSR_BITMAP		= 1ul << 28,
 	CPU_SECONDARY		= 1ul << 31,
 };
 
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index c1b39f4..61b0cef 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -1,4 +1,15 @@
 #include "vmx.h"
+#include "msr.h"
+#include "processor.h"
+#include "vm.h"
+
+u64 ia32_pat;
+u64 ia32_efer;
+
+static inline void vmcall()
+{
+	asm volatile("vmcall");
+}
 
 void basic_init()
 {
@@ -76,6 +87,176 @@ int vmenter_exit_handler()
 	return VMX_TEST_VMEXIT;
 }
 
+void msr_bmp_init()
+{
+	void *msr_bitmap;
+	u32 ctrl_cpu0;
+
+	msr_bitmap = alloc_page();
+	memset(msr_bitmap, 0x0, PAGE_SIZE);
+	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
+	ctrl_cpu0 |= CPU_MSR_BITMAP;
+	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
+	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
+}
+
+static void test_ctrl_pat_init()
+{
+	u64 ctrl_ent;
+	u64 ctrl_exi;
+
+	msr_bmp_init();
+	ctrl_ent = vmcs_read(ENT_CONTROLS);
+	ctrl_exi = vmcs_read(EXI_CONTROLS);
+	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
+	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
+	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
+	vmcs_write(GUEST_PAT, 0x0);
+	vmcs_write(HOST_PAT, ia32_pat);
+}
+
+static void test_ctrl_pat_main()
+{
+	u64 guest_ia32_pat;
+
+	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
+	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
+		printf("\tENT_LOAD_PAT is not supported.\n");
+	else {
+		if (guest_ia32_pat != 0) {
+			report("Entry load PAT", 0);
+			return;
+		}
+	}
+	wrmsr(MSR_IA32_CR_PAT, 0x6);
+	vmcall();
+	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
+	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
+		if (guest_ia32_pat != ia32_pat) {
+			report("Entry load PAT", 0);
+			return;
+		}
+		report("Entry load PAT", 1);
+	}
+}
+
+static int test_ctrl_pat_exit_handler()
+{
+	u64 guest_rip;
+	ulong reason;
+	u64 guest_pat;
+
+	guest_rip = vmcs_read(GUEST_RIP);
+	reason = vmcs_read(EXI_REASON) & 0xff;
+	switch (reason) {
+	case VMX_VMCALL:
+		guest_pat = vmcs_read(GUEST_PAT);
+		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
+			printf("\tEXI_SAVE_PAT is not supported\n");
+			vmcs_write(GUEST_PAT, 0x6);
+		} else {
+			if (guest_pat == 0x6)
+				report("Exit save PAT", 1);
+			else
+				report("Exit save PAT", 0);
+		}
+		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
+			printf("\tEXI_LOAD_PAT is not supported\n");
+		else {
+			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
+				report("Exit load PAT", 1);
+			else
+				report("Exit load PAT", 0);
+		}
+		vmcs_write(GUEST_PAT, ia32_pat);
+		vmcs_write(GUEST_RIP, guest_rip + 3);
+		return VMX_TEST_RESUME;
+	default:
+		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
+		break;
+	}
+	return VMX_TEST_VMEXIT;
+}
+
+static void test_ctrl_efer_init()
+{
+	u64 ctrl_ent;
+	u64 ctrl_exi;
+
+	msr_bmp_init();
+	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
+	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
+	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
+	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
+	ia32_efer = rdmsr(MSR_EFER);
+	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
+	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
+}
+
+static void test_ctrl_efer_main()
+{
+	u64 guest_ia32_efer;
+
+	guest_ia32_efer = rdmsr(MSR_EFER);
+	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
+		printf("\tENT_LOAD_EFER is not supported.\n");
+	else {
+		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
+			report("Entry load EFER", 0);
+			return;
+		}
+	}
+	wrmsr(MSR_EFER, ia32_efer);
+	vmcall();
+	guest_ia32_efer = rdmsr(MSR_EFER);
+	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
+		if (guest_ia32_efer != ia32_efer) {
+			report("Entry load EFER", 0);
+			return;
+		}
+		report("Entry load EFER", 1);
+	}
+}
+
+static int test_ctrl_efer_exit_handler()
+{
+	u64 guest_rip;
+	ulong reason;
+	u64 guest_efer;
+
+	guest_rip = vmcs_read(GUEST_RIP);
+	reason = vmcs_read(EXI_REASON) & 0xff;
+	switch (reason) {
+	case VMX_VMCALL:
+		guest_efer = vmcs_read(GUEST_EFER);
+		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
+			printf("\tEXI_SAVE_EFER is not supported\n");
+			vmcs_write(GUEST_EFER, ia32_efer);
+		} else {
+			if (guest_efer == ia32_efer)
+				report("Exit save EFER", 1);
+			else
+				report("Exit save EFER", 0);
+		}
+		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
+			printf("\tEXI_LOAD_EFER is not supported\n");
+			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
+		} else {
+			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
+				report("Exit load EFER", 1);
+			else
+				report("Exit load EFER", 0);
+		}
+		vmcs_write(GUEST_PAT, ia32_efer);
+		vmcs_write(GUEST_RIP, guest_rip + 3);
+		return VMX_TEST_RESUME;
+	default:
+		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
+		break;
+	}
+	return VMX_TEST_VMEXIT;
+}
+
 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
    basic_* just implement some basic functions */
 struct vmx_test vmx_tests[] = {
@@ -83,5 +264,9 @@ struct vmx_test vmx_tests[] = {
 		basic_syscall_handler, {0} },
 	{ "vmenter", basic_init, vmenter_main, vmenter_exit_handler,
 		basic_syscall_handler, {0} },
+	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
+		test_ctrl_pat_exit_handler, basic_syscall_handler, {0} },
+	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
+		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
 	{ NULL, NULL, NULL, NULL, NULL, {0} },
 };
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 2/4] kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing
  2013-08-15 11:45 [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
  2013-08-15 11:45 ` [PATCH v2 1/4] kvm-unit-tests: VMX: Add test cases for PAT and EFER Arthur Chunqi Li
@ 2013-08-15 11:45 ` Arthur Chunqi Li
  2013-09-09 12:36   ` Paolo Bonzini
  2013-08-15 11:45 ` [PATCH v2 3/4] kvm-unit-tests: VMX: Add test cases for I/O bitmaps Arthur Chunqi Li
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 11+ messages in thread
From: Arthur Chunqi Li @ 2013-08-15 11:45 UTC (permalink / raw)
  To: kvm; +Cc: jan.kiszka, gleb, pbonzini, Arthur Chunqi Li

Add testing for CR0/4 shadowing. Two types of flags in CR0/4 are
tested: flags owned and shadowed by L1. They are treated differently
in KVM. We test one flag of both types in CR0 (TS and MP) and CR4
(DE and TSD) with read through, read shadow, write through, write
shadow (same as and different from shadowed value).

Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
---
 lib/x86/vm.h    |    4 +
 x86/vmx_tests.c |  218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 222 insertions(+)

diff --git a/lib/x86/vm.h b/lib/x86/vm.h
index eff6f72..6e0ce2b 100644
--- a/lib/x86/vm.h
+++ b/lib/x86/vm.h
@@ -17,9 +17,13 @@
 #define PTE_ADDR    (0xffffffffff000ull)
 
 #define X86_CR0_PE      0x00000001
+#define X86_CR0_MP      0x00000002
+#define X86_CR0_TS      0x00000008
 #define X86_CR0_WP      0x00010000
 #define X86_CR0_PG      0x80000000
 #define X86_CR4_VMXE   0x00000001
+#define X86_CR4_TSD     0x00000004
+#define X86_CR4_DE      0x00000008
 #define X86_CR4_PSE     0x00000010
 #define X86_CR4_PAE     0x00000020
 #define X86_CR4_PCIDE  0x00020000
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 61b0cef..a5cc353 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -5,12 +5,20 @@
 
 u64 ia32_pat;
 u64 ia32_efer;
+volatile u32 stage;
 
 static inline void vmcall()
 {
 	asm volatile("vmcall");
 }
 
+static inline void set_stage(u32 s)
+{
+	barrier();
+	stage = s;
+	barrier();
+}
+
 void basic_init()
 {
 }
@@ -257,6 +265,214 @@ static int test_ctrl_efer_exit_handler()
 	return VMX_TEST_VMEXIT;
 }
 
+u32 guest_cr0, guest_cr4;
+
+static void cr_shadowing_main()
+{
+	u32 cr0, cr4, tmp;
+
+	// Test read through
+	set_stage(0);
+	guest_cr0 = read_cr0();
+	if (stage == 1)
+		report("Read through CR0", 0);
+	else
+		vmcall();
+	set_stage(1);
+	guest_cr4 = read_cr4();
+	if (stage == 2)
+		report("Read through CR4", 0);
+	else
+		vmcall();
+	// Test write through
+	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
+	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
+	set_stage(2);
+	write_cr0(guest_cr0);
+	if (stage == 3)
+		report("Write throuth CR0", 0);
+	else
+		vmcall();
+	set_stage(3);
+	write_cr4(guest_cr4);
+	if (stage == 4)
+		report("Write through CR4", 0);
+	else
+		vmcall();
+	// Test read shadow
+	set_stage(4);
+	vmcall();
+	cr0 = read_cr0();
+	if (stage != 5) {
+		if (cr0 == guest_cr0)
+			report("Read shadowing CR0", 1);
+		else
+			report("Read shadowing CR0", 0);
+	}
+	set_stage(5);
+	cr4 = read_cr4();
+	if (stage != 6) {
+		if (cr4 == guest_cr4)
+			report("Read shadowing CR4", 1);
+		else
+			report("Read shadowing CR4", 0);
+	}
+	// Test write shadow (same value with shadow)
+	set_stage(6);
+	write_cr0(guest_cr0);
+	if (stage == 7)
+		report("Write shadowing CR0 (same value with shadow)", 0);
+	else
+		vmcall();
+	set_stage(7);
+	write_cr4(guest_cr4);
+	if (stage == 8)
+		report("Write shadowing CR4 (same value with shadow)", 0);
+	else
+		vmcall();
+	// Test write shadow (different value)
+	set_stage(8);
+	tmp = guest_cr0 ^ X86_CR0_TS;
+	asm volatile("mov %0, %%rsi\n\t"
+		"mov %%rsi, %%cr0\n\t"
+		::"m"(tmp)
+		:"rsi", "memory", "cc");
+	if (stage != 9)
+		report("Write shadowing different X86_CR0_TS", 0);
+	else
+		report("Write shadowing different X86_CR0_TS", 1);
+	set_stage(9);
+	tmp = guest_cr0 ^ X86_CR0_MP;
+	asm volatile("mov %0, %%rsi\n\t"
+		"mov %%rsi, %%cr0\n\t"
+		::"m"(tmp)
+		:"rsi", "memory", "cc");
+	if (stage != 10)
+		report("Write shadowing different X86_CR0_MP", 0);
+	else
+		report("Write shadowing different X86_CR0_MP", 1);
+	set_stage(10);
+	tmp = guest_cr4 ^ X86_CR4_TSD;
+	asm volatile("mov %0, %%rsi\n\t"
+		"mov %%rsi, %%cr4\n\t"
+		::"m"(tmp)
+		:"rsi", "memory", "cc");
+	if (stage != 11)
+		report("Write shadowing different X86_CR4_TSD", 0);
+	else
+		report("Write shadowing different X86_CR4_TSD", 1);
+	set_stage(11);
+	tmp = guest_cr4 ^ X86_CR4_DE;
+	asm volatile("mov %0, %%rsi\n\t"
+		"mov %%rsi, %%cr4\n\t"
+		::"m"(tmp)
+		:"rsi", "memory", "cc");
+	if (stage != 12)
+		report("Write shadowing different X86_CR4_DE", 0);
+	else
+		report("Write shadowing different X86_CR4_DE", 1);
+}
+
+static int cr_shadowing_exit_handler()
+{
+	u64 guest_rip;
+	ulong reason;
+	u32 insn_len;
+	u32 exit_qual;
+
+	guest_rip = vmcs_read(GUEST_RIP);
+	reason = vmcs_read(EXI_REASON) & 0xff;
+	insn_len = vmcs_read(EXI_INST_LEN);
+	exit_qual = vmcs_read(EXI_QUALIFICATION);
+	switch (reason) {
+	case VMX_VMCALL:
+		switch (stage) {
+		case 0:
+			if (guest_cr0 == vmcs_read(GUEST_CR0))
+				report("Read through CR0", 1);
+			else
+				report("Read through CR0", 0);
+			break;
+		case 1:
+			if (guest_cr4 == vmcs_read(GUEST_CR4))
+				report("Read through CR4", 1);
+			else
+				report("Read through CR4", 0);
+			break;
+		case 2:
+			if (guest_cr0 == vmcs_read(GUEST_CR0))
+				report("Write through CR0", 1);
+			else
+				report("Write through CR0", 0);
+			break;
+		case 3:
+			if (guest_cr4 == vmcs_read(GUEST_CR4))
+				report("Write through CR4", 1);
+			else
+				report("Write through CR4", 0);
+			break;
+		case 4:
+			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
+			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
+			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
+			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
+			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
+			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
+			break;
+		case 6:
+			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
+				report("Write shadowing CR0 (same value)", 1);
+			else
+				report("Write shadowing CR0 (same value)", 0);
+			break;
+		case 7:
+			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
+				report("Write shadowing CR4 (same value)", 1);
+			else
+				report("Write shadowing CR4 (same value)", 0);
+			break;
+		}
+		vmcs_write(GUEST_RIP, guest_rip + insn_len);
+		return VMX_TEST_RESUME;
+	case VMX_CR:
+		switch (stage) {
+		case 4:
+			report("Read shadowing CR0", 0);
+			set_stage(stage + 1);
+			break;
+		case 5:
+			report("Read shadowing CR4", 0);
+			set_stage(stage + 1);
+			break;
+		case 6:
+			report("Write shadowing CR0 (same value)", 0);
+			set_stage(stage + 1);
+			break;
+		case 7:
+			report("Write shadowing CR4 (same value)", 0);
+			set_stage(stage + 1);
+			break;
+		case 8:
+		case 9:
+			// 0x600 encodes "mov %esi, %cr0"
+			if (exit_qual == 0x600)
+				set_stage(stage + 1);
+			break;
+		case 10:
+		case 11:
+			// 0x604 encodes "mov %esi, %cr4"
+			if (exit_qual == 0x604)
+				set_stage(stage + 1);
+		}
+		vmcs_write(GUEST_RIP, guest_rip + insn_len);
+		return VMX_TEST_RESUME;
+	default:
+		printf("Unknown exit reason, %d\n", reason);
+		print_vmexit_info();
+	}
+	return VMX_TEST_VMEXIT;
+}
+
 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
    basic_* just implement some basic functions */
 struct vmx_test vmx_tests[] = {
@@ -268,5 +484,7 @@ struct vmx_test vmx_tests[] = {
 		test_ctrl_pat_exit_handler, basic_syscall_handler, {0} },
 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
 		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
+	{ "CR shadowing", basic_init, cr_shadowing_main,
+		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
 	{ NULL, NULL, NULL, NULL, NULL, {0} },
 };
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 3/4] kvm-unit-tests: VMX: Add test cases for I/O bitmaps
  2013-08-15 11:45 [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
  2013-08-15 11:45 ` [PATCH v2 1/4] kvm-unit-tests: VMX: Add test cases for PAT and EFER Arthur Chunqi Li
  2013-08-15 11:45 ` [PATCH v2 2/4] kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing Arthur Chunqi Li
@ 2013-08-15 11:45 ` Arthur Chunqi Li
  2013-09-09 12:39   ` Paolo Bonzini
  2013-08-15 11:45 ` [PATCH v2 4/4] kvm-unit-tests: VMX: Add test cases for instruction interception Arthur Chunqi Li
  2013-09-02  9:06 ` [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
  4 siblings, 1 reply; 11+ messages in thread
From: Arthur Chunqi Li @ 2013-08-15 11:45 UTC (permalink / raw)
  To: kvm; +Cc: jan.kiszka, gleb, pbonzini, Arthur Chunqi Li

Add test cases for I/O bitmaps, including corner cases.

Test includes: pass & trap, in & out, different I/O width, low & high
I/O bitmap, partial I/O pass, overrun (inl 0xFFFF).

Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
---
 x86/vmx.h       |    6 +--
 x86/vmx_tests.c |  159 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 162 insertions(+), 3 deletions(-)

diff --git a/x86/vmx.h b/x86/vmx.h
index 18961f1..dba8b20 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -417,15 +417,15 @@ enum Ctrl1 {
 	"popf\n\t"
 
 #define VMX_IO_SIZE_MASK		0x7
-#define _VMX_IO_BYTE			1
-#define _VMX_IO_WORD			2
+#define _VMX_IO_BYTE			0
+#define _VMX_IO_WORD			1
 #define _VMX_IO_LONG			3
 #define VMX_IO_DIRECTION_MASK		(1ul << 3)
 #define VMX_IO_IN			(1ul << 3)
 #define VMX_IO_OUT			0
 #define VMX_IO_STRING			(1ul << 4)
 #define VMX_IO_REP			(1ul << 5)
-#define VMX_IO_OPRAND_DX		(1ul << 6)
+#define VMX_IO_OPRAND_IMM		(1ul << 6)
 #define VMX_IO_PORT_MASK		0xFFFF0000
 #define VMX_IO_PORT_SHIFT		16
 
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index a5cc353..cd4dd99 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -2,10 +2,13 @@
 #include "msr.h"
 #include "processor.h"
 #include "vm.h"
+#include "io.h"
 
 u64 ia32_pat;
 u64 ia32_efer;
 volatile u32 stage;
+void *io_bitmap_a, *io_bitmap_b;
+u16 ioport;
 
 static inline void vmcall()
 {
@@ -473,6 +476,160 @@ static int cr_shadowing_exit_handler()
 	return VMX_TEST_VMEXIT;
 }
 
+static void iobmp_init()
+{
+	u32 ctrl_cpu0;
+
+	io_bitmap_a = alloc_page();
+	io_bitmap_a = alloc_page();
+	memset(io_bitmap_a, 0x0, PAGE_SIZE);
+	memset(io_bitmap_b, 0x0, PAGE_SIZE);
+	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
+	ctrl_cpu0 |= CPU_IO_BITMAP;
+	ctrl_cpu0 &= (~CPU_IO);
+	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
+	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
+	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
+}
+
+static void iobmp_main()
+{
+	// stage 0, test IO pass
+	set_stage(0);
+	inb(0x5000);
+	outb(0x0, 0x5000);
+	if (stage != 0)
+		report("I/O bitmap - I/O pass", 0);
+	else
+		report("I/O bitmap - I/O pass", 1);
+	// test IO width, in/out
+	((u8 *)io_bitmap_a)[0] = 0xFF;
+	set_stage(2);
+	inb(0x0);
+	if (stage != 3)
+		report("I/O bitmap - trap in", 0);
+	else
+		report("I/O bitmap - trap in", 1);
+	set_stage(3);
+	outw(0x0, 0x0);
+	if (stage != 4)
+		report("I/O bitmap - trap out", 0);
+	else
+		report("I/O bitmap - trap out", 1);
+	set_stage(4);
+	inl(0x0);
+	if (stage != 5)
+		report("I/O bitmap - I/O width, long", 0);
+	// test low/high IO port
+	set_stage(5);
+	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
+	inb(0x5000);
+	if (stage == 6)
+		report("I/O bitmap - I/O port, low part", 1);
+	else
+		report("I/O bitmap - I/O port, low part", 0);
+	set_stage(6);
+	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
+	inb(0x9000);
+	if (stage == 7)
+		report("I/O bitmap - I/O port, high part", 1);
+	else
+		report("I/O bitmap - I/O port, high part", 0);
+	// test partial pass
+	set_stage(7);
+	inl(0x4FFF);
+	if (stage == 8)
+		report("I/O bitmap - partial pass", 1);
+	else
+		report("I/O bitmap - partial pass", 0);
+	// test overrun
+	set_stage(8);
+	memset(io_bitmap_a, 0x0, PAGE_SIZE);
+	memset(io_bitmap_b, 0x0, PAGE_SIZE);
+	inl(0xFFFF);
+	if (stage == 9)
+		report("I/O bitmap - overrun", 1);
+	else
+		report("I/O bitmap - overrun", 0);
+	
+	return;
+}
+
+static int iobmp_exit_handler()
+{
+	u64 guest_rip;
+	ulong reason, exit_qual;
+	u32 insn_len;
+
+	guest_rip = vmcs_read(GUEST_RIP);
+	reason = vmcs_read(EXI_REASON) & 0xff;
+	exit_qual = vmcs_read(EXI_QUALIFICATION);
+	insn_len = vmcs_read(EXI_INST_LEN);
+	switch (reason) {
+	case VMX_IO:
+		switch (stage) {
+		case 2:
+			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
+				report("I/O bitmap - I/O width, byte", 0);
+			else
+				report("I/O bitmap - I/O width, byte", 1);
+			if (!(exit_qual & VMX_IO_IN))
+				report("I/O bitmap - I/O direction, in", 0);
+			else
+				report("I/O bitmap - I/O direction, in", 1);
+			set_stage(stage + 1);
+			break;
+		case 3:
+			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
+				report("I/O bitmap - I/O width, word", 0);
+			else
+				report("I/O bitmap - I/O width, word", 1);
+			if (!(exit_qual & VMX_IO_IN))
+				report("I/O bitmap - I/O direction, out", 1);
+			else
+				report("I/O bitmap - I/O direction, out", 0);
+			set_stage(stage + 1);
+			break;
+		case 4:
+			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
+				report("I/O bitmap - I/O width, long", 0);
+			else
+				report("I/O bitmap - I/O width, long", 1);
+			set_stage(stage + 1);
+			break;
+		case 5:
+			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
+				set_stage(stage + 1);
+			break;
+		case 6:
+			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
+				set_stage(stage + 1);
+			break;
+		case 7:
+			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
+				set_stage(stage + 1);
+			break;
+		case 8:
+			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
+				set_stage(stage + 1);
+			break;
+		case 0:
+		case 1:
+			set_stage(stage + 1);
+		default:
+			// Should not reach here
+			break;
+		}
+		vmcs_write(GUEST_RIP, guest_rip + insn_len);
+		return VMX_TEST_RESUME;
+	default:
+		printf("guest_rip = 0x%llx\n", guest_rip);
+		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
+		break;
+	}
+	return VMX_TEST_VMEXIT;
+}
+
 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
    basic_* just implement some basic functions */
 struct vmx_test vmx_tests[] = {
@@ -486,5 +643,7 @@ struct vmx_test vmx_tests[] = {
 		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
 	{ "CR shadowing", basic_init, cr_shadowing_main,
 		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
+	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
+		basic_syscall_handler, {0} },
 	{ NULL, NULL, NULL, NULL, NULL, {0} },
 };
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 4/4] kvm-unit-tests: VMX: Add test cases for instruction  interception
  2013-08-15 11:45 [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
                   ` (2 preceding siblings ...)
  2013-08-15 11:45 ` [PATCH v2 3/4] kvm-unit-tests: VMX: Add test cases for I/O bitmaps Arthur Chunqi Li
@ 2013-08-15 11:45 ` Arthur Chunqi Li
  2013-09-09 12:42   ` Paolo Bonzini
  2013-09-02  9:06 ` [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
  4 siblings, 1 reply; 11+ messages in thread
From: Arthur Chunqi Li @ 2013-08-15 11:45 UTC (permalink / raw)
  To: kvm; +Cc: jan.kiszka, gleb, pbonzini, Arthur Chunqi Li

Add test cases for instruction interception, including four types:
1. Primary Processor-Based VM-Execution Controls (HLT/INVLPG/MWAIT/
RDPMC/RDTSC/MONITOR/PAUSE)
2. Secondary Processor-Based VM-Execution Controls (WBINVD)
3. No control flag, always trap (CPUID/INVD)
4. Instructions always pass

Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
---
 x86/vmx.c       |    3 +-
 x86/vmx.h       |    7 +++
 x86/vmx_tests.c |  152 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 160 insertions(+), 2 deletions(-)

diff --git a/x86/vmx.c b/x86/vmx.c
index ca36d35..c346070 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -336,8 +336,7 @@ static void init_vmx(void)
 			: MSR_IA32_VMX_ENTRY_CTLS);
 	ctrl_cpu_rev[0].val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PROC
 			: MSR_IA32_VMX_PROCBASED_CTLS);
-	if (ctrl_cpu_rev[0].set & CPU_SECONDARY)
-		ctrl_cpu_rev[1].val = rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
+	ctrl_cpu_rev[1].val = rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
 	if (ctrl_cpu_rev[1].set & CPU_EPT || ctrl_cpu_rev[1].set & CPU_VPID)
 		ept_vpid.val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
 
diff --git a/x86/vmx.h b/x86/vmx.h
index dba8b20..2784ac6 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -354,12 +354,17 @@ enum Ctrl0 {
 	CPU_INTR_WINDOW		= 1ul << 2,
 	CPU_HLT			= 1ul << 7,
 	CPU_INVLPG		= 1ul << 9,
+	CPU_MWAIT		= 1ul << 10,
+	CPU_RDPMC		= 1ul << 11,
+	CPU_RDTSC		= 1ul << 12,
 	CPU_CR3_LOAD		= 1ul << 15,
 	CPU_CR3_STORE		= 1ul << 16,
 	CPU_TPR_SHADOW		= 1ul << 21,
 	CPU_NMI_WINDOW		= 1ul << 22,
 	CPU_IO			= 1ul << 24,
 	CPU_IO_BITMAP		= 1ul << 25,
+	CPU_MONITOR		= 1ul << 29,
+	CPU_PAUSE		= 1ul << 30,
 	CPU_MSR_BITMAP		= 1ul << 28,
 	CPU_SECONDARY		= 1ul << 31,
 };
@@ -368,6 +373,8 @@ enum Ctrl1 {
 	CPU_EPT			= 1ul << 1,
 	CPU_VPID		= 1ul << 5,
 	CPU_URG			= 1ul << 7,
+	CPU_WBINVD		= 1ul << 6,
+	CPU_RDRAND		= 1ul << 11,
 };
 
 #define SAVE_GPR				\
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index cd4dd99..be3e3b4 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -22,6 +22,16 @@ static inline void set_stage(u32 s)
 	barrier();
 }
 
+static inline u32 get_stage()
+{
+	u32 s;
+
+	barrier();
+	s = stage;
+	barrier();
+	return s;
+}
+
 void basic_init()
 {
 }
@@ -630,6 +640,146 @@ static int iobmp_exit_handler()
 	return VMX_TEST_VMEXIT;
 }
 
+#define INSN_CPU0		0
+#define INSN_CPU1		1
+#define INSN_ALWAYS_TRAP	2
+#define INSN_NEVER_TRAP		3
+
+#define FIELD_EXIT_QUAL		0
+#define FIELD_INSN_INFO		1
+
+asm(
+	"insn_hlt: hlt;ret\n\t"
+	"insn_invlpg: invlpg 0x12345678;ret\n\t"
+	"insn_mwait: mwait;ret\n\t"
+	"insn_rdpmc: rdpmc;ret\n\t"
+	"insn_rdtsc: rdtsc;ret\n\t"
+	"insn_monitor: monitor;ret\n\t"
+	"insn_pause: pause;ret\n\t"
+	"insn_wbinvd: wbinvd;ret\n\t"
+	"insn_cpuid: cpuid;ret\n\t"
+	"insn_invd: invd;ret\n\t"
+);
+extern void insn_hlt();
+extern void insn_invlpg();
+extern void insn_mwait();
+extern void insn_rdpmc();
+extern void insn_rdtsc();
+extern void insn_monitor();
+extern void insn_pause();
+extern void insn_wbinvd();
+extern void insn_cpuid();
+extern void insn_invd();
+
+u32 cur_insn;
+
+struct insn_table {
+	const char *name;
+	u32 flag;
+	void (*insn_func)();
+	u32 type;
+	u32 reason;
+	ulong exit_qual;
+	u32 insn_info;
+	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
+	// which field need to be tested, reason is always tested
+	u32 test_field;
+};
+
+static struct insn_table insn_table[] = {
+	// Flags for Primary Processor-Based VM-Execution Controls
+	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
+	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
+		0x12345678, 0, FIELD_EXIT_QUAL},
+	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
+	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
+	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
+	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
+	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
+	// Flags for Secondary Processor-Based VM-Execution Controls
+	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
+	// Instructions always trap
+	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
+	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
+	// Instructions never trap
+	{NULL},
+};
+
+static void insn_intercept_init()
+{
+	u32 ctrl_cpu[2];
+
+	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
+	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
+		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
+	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
+	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
+	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
+	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
+	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
+	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
+}
+
+static void insn_intercept_main()
+{
+	cur_insn = 0;
+	while(insn_table[cur_insn].name != NULL) {
+		set_stage(cur_insn);
+		if ((insn_table[cur_insn].type == INSN_CPU0
+			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
+			|| (insn_table[cur_insn].type == INSN_CPU1
+			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
+			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
+				insn_table[cur_insn].name);
+			continue;
+		}
+		insn_table[cur_insn].insn_func();
+		switch (insn_table[cur_insn].type) {
+		case INSN_CPU0:
+		case INSN_CPU1:
+		case INSN_ALWAYS_TRAP:
+			if (stage != cur_insn + 1)
+				report(insn_table[cur_insn].name, 0);
+			else
+				report(insn_table[cur_insn].name, 1);
+			break;
+		case INSN_NEVER_TRAP:
+			if (stage == cur_insn + 1)
+				report(insn_table[cur_insn].name, 0);
+			else
+				report(insn_table[cur_insn].name, 1);
+			break;
+		}
+		cur_insn ++;
+	}
+}
+
+static int insn_intercept_exit_handler()
+{
+	u64 guest_rip;
+	u32 reason;
+	ulong exit_qual;
+	u32 insn_len;
+	u32 insn_info;
+	bool pass;
+
+	guest_rip = vmcs_read(GUEST_RIP);
+	reason = vmcs_read(EXI_REASON) & 0xff;
+	exit_qual = vmcs_read(EXI_QUALIFICATION);
+	insn_len = vmcs_read(EXI_INST_LEN);
+	insn_info = vmcs_read(EXI_INST_INFO);
+	pass = (cur_insn == get_stage()) &&
+			insn_table[cur_insn].reason == reason;
+	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
+		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
+	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
+		pass = pass && insn_table[cur_insn].insn_info == insn_info;
+	if (pass)
+		set_stage(stage + 1);
+	vmcs_write(GUEST_RIP, guest_rip + insn_len);
+	return VMX_TEST_RESUME;
+}
+
 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
    basic_* just implement some basic functions */
 struct vmx_test vmx_tests[] = {
@@ -645,5 +795,7 @@ struct vmx_test vmx_tests[] = {
 		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
 		basic_syscall_handler, {0} },
+	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
+		insn_intercept_exit_handler, basic_syscall_handler, {0} },
 	{ NULL, NULL, NULL, NULL, NULL, {0} },
 };
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases
  2013-08-15 11:45 [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
                   ` (3 preceding siblings ...)
  2013-08-15 11:45 ` [PATCH v2 4/4] kvm-unit-tests: VMX: Add test cases for instruction interception Arthur Chunqi Li
@ 2013-09-02  9:06 ` Arthur Chunqi Li
  2013-09-03 11:50   ` Gleb Natapov
  4 siblings, 1 reply; 11+ messages in thread
From: Arthur Chunqi Li @ 2013-09-02  9:06 UTC (permalink / raw)
  To: kvm; +Cc: Jan Kiszka, Gleb Natapov, Paolo Bonzini, Arthur Chunqi Li

Hi Gleb, Paolo and Jan,

Would you please review this series of codes when you can spare time?
Jan has review it and, of course, further suggestions are welcomed.

Arthur

On Thu, Aug 15, 2013 at 7:45 PM, Arthur Chunqi Li <yzt356@gmail.com> wrote:
> Add a series of test cases for nested VMX in kvm-unit-tests.
>
> Arthur Chunqi Li (4):
>   kvm-unit-tests: VMX: Add test cases for PAT and EFER
>   kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing
>   kvm-unit-tests: VMX: Add test cases for I/O bitmaps
>   kvm-unit-tests: VMX: Add test cases for instruction      interception
>
>  lib/x86/vm.h    |    4 +
>  x86/vmx.c       |    3 +-
>  x86/vmx.h       |   20 +-
>  x86/vmx_tests.c |  714 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  4 files changed, 736 insertions(+), 5 deletions(-)
>
> --
> 1.7.9.5
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases
  2013-09-02  9:06 ` [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
@ 2013-09-03 11:50   ` Gleb Natapov
  2013-09-09  7:15     ` Jan Kiszka
  0 siblings, 1 reply; 11+ messages in thread
From: Gleb Natapov @ 2013-09-03 11:50 UTC (permalink / raw)
  To: Arthur Chunqi Li; +Cc: kvm, Jan Kiszka, Paolo Bonzini

On Mon, Sep 02, 2013 at 05:06:23PM +0800, Arthur Chunqi Li wrote:
> Hi Gleb, Paolo and Jan,
> 
Jan, have you reviewed it? Any ACKs?

> Would you please review this series of codes when you can spare time?
> Jan has review it and, of course, further suggestions are welcomed.
> 
> Arthur
> 
> On Thu, Aug 15, 2013 at 7:45 PM, Arthur Chunqi Li <yzt356@gmail.com> wrote:
> > Add a series of test cases for nested VMX in kvm-unit-tests.
> >
> > Arthur Chunqi Li (4):
> >   kvm-unit-tests: VMX: Add test cases for PAT and EFER
> >   kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing
> >   kvm-unit-tests: VMX: Add test cases for I/O bitmaps
> >   kvm-unit-tests: VMX: Add test cases for instruction      interception
> >
> >  lib/x86/vm.h    |    4 +
> >  x86/vmx.c       |    3 +-
> >  x86/vmx.h       |   20 +-
> >  x86/vmx_tests.c |  714 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  4 files changed, 736 insertions(+), 5 deletions(-)
> >
> > --
> > 1.7.9.5
> >

--
			Gleb.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases
  2013-09-03 11:50   ` Gleb Natapov
@ 2013-09-09  7:15     ` Jan Kiszka
  0 siblings, 0 replies; 11+ messages in thread
From: Jan Kiszka @ 2013-09-09  7:15 UTC (permalink / raw)
  To: Gleb Natapov; +Cc: Arthur Chunqi Li, kvm, Paolo Bonzini

[-- Attachment #1: Type: text/plain, Size: 1163 bytes --]

On 2013-09-03 13:50, Gleb Natapov wrote:
> On Mon, Sep 02, 2013 at 05:06:23PM +0800, Arthur Chunqi Li wrote:
>> Hi Gleb, Paolo and Jan,
>>
> Jan, have you reviewed it? Any ACKs?

I've only reviewed v1, but I scheduled v2 for tonight now.

Jan

> 
>> Would you please review this series of codes when you can spare time?
>> Jan has review it and, of course, further suggestions are welcomed.
>>
>> Arthur
>>
>> On Thu, Aug 15, 2013 at 7:45 PM, Arthur Chunqi Li <yzt356@gmail.com> wrote:
>>> Add a series of test cases for nested VMX in kvm-unit-tests.
>>>
>>> Arthur Chunqi Li (4):
>>>   kvm-unit-tests: VMX: Add test cases for PAT and EFER
>>>   kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing
>>>   kvm-unit-tests: VMX: Add test cases for I/O bitmaps
>>>   kvm-unit-tests: VMX: Add test cases for instruction      interception
>>>
>>>  lib/x86/vm.h    |    4 +
>>>  x86/vmx.c       |    3 +-
>>>  x86/vmx.h       |   20 +-
>>>  x86/vmx_tests.c |  714 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>  4 files changed, 736 insertions(+), 5 deletions(-)
>>>
>>> --
>>> 1.7.9.5
>>>
> 
> --
> 			Gleb.
> 



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 263 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/4] kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing
  2013-08-15 11:45 ` [PATCH v2 2/4] kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing Arthur Chunqi Li
@ 2013-09-09 12:36   ` Paolo Bonzini
  0 siblings, 0 replies; 11+ messages in thread
From: Paolo Bonzini @ 2013-09-09 12:36 UTC (permalink / raw)
  To: Arthur Chunqi Li; +Cc: kvm, jan.kiszka, gleb

Il 15/08/2013 13:45, Arthur Chunqi Li ha scritto:
> Add testing for CR0/4 shadowing. Two types of flags in CR0/4 are
> tested: flags owned and shadowed by L1. They are treated differently
> in KVM. We test one flag of both types in CR0 (TS and MP) and CR4
> (DE and TSD) with read through, read shadow, write through, write
> shadow (same as and different from shadowed value).
> 
> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
> ---
>  lib/x86/vm.h    |    4 +
>  x86/vmx_tests.c |  218 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 222 insertions(+)
> 
> diff --git a/lib/x86/vm.h b/lib/x86/vm.h
> index eff6f72..6e0ce2b 100644
> --- a/lib/x86/vm.h
> +++ b/lib/x86/vm.h
> @@ -17,9 +17,13 @@
>  #define PTE_ADDR    (0xffffffffff000ull)
>  
>  #define X86_CR0_PE      0x00000001
> +#define X86_CR0_MP      0x00000002
> +#define X86_CR0_TS      0x00000008
>  #define X86_CR0_WP      0x00010000
>  #define X86_CR0_PG      0x80000000
>  #define X86_CR4_VMXE   0x00000001
> +#define X86_CR4_TSD     0x00000004
> +#define X86_CR4_DE      0x00000008
>  #define X86_CR4_PSE     0x00000010
>  #define X86_CR4_PAE     0x00000020
>  #define X86_CR4_PCIDE  0x00020000
> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
> index 61b0cef..a5cc353 100644
> --- a/x86/vmx_tests.c
> +++ b/x86/vmx_tests.c
> @@ -5,12 +5,20 @@
>  
>  u64 ia32_pat;
>  u64 ia32_efer;
> +volatile u32 stage;
>  
>  static inline void vmcall()
>  {
>  	asm volatile("vmcall");
>  }
>  
> +static inline void set_stage(u32 s)
> +{
> +	barrier();
> +	stage = s;
> +	barrier();
> +}
> +
>  void basic_init()
>  {
>  }
> @@ -257,6 +265,214 @@ static int test_ctrl_efer_exit_handler()
>  	return VMX_TEST_VMEXIT;
>  }
>  
> +u32 guest_cr0, guest_cr4;
> +
> +static void cr_shadowing_main()
> +{
> +	u32 cr0, cr4, tmp;
> +
> +	// Test read through
> +	set_stage(0);
> +	guest_cr0 = read_cr0();
> +	if (stage == 1)
> +		report("Read through CR0", 0);
> +	else
> +		vmcall();
> +	set_stage(1);
> +	guest_cr4 = read_cr4();
> +	if (stage == 2)
> +		report("Read through CR4", 0);
> +	else
> +		vmcall();
> +	// Test write through
> +	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
> +	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
> +	set_stage(2);
> +	write_cr0(guest_cr0);
> +	if (stage == 3)
> +		report("Write throuth CR0", 0);
> +	else
> +		vmcall();
> +	set_stage(3);
> +	write_cr4(guest_cr4);
> +	if (stage == 4)
> +		report("Write through CR4", 0);
> +	else
> +		vmcall();
> +	// Test read shadow
> +	set_stage(4);
> +	vmcall();
> +	cr0 = read_cr0();
> +	if (stage != 5) {
> +		if (cr0 == guest_cr0)
> +			report("Read shadowing CR0", 1);
> +		else
> +			report("Read shadowing CR0", 0);
> +	}
> +	set_stage(5);
> +	cr4 = read_cr4();
> +	if (stage != 6) {
> +		if (cr4 == guest_cr4)
> +			report("Read shadowing CR4", 1);
> +		else
> +			report("Read shadowing CR4", 0);
> +	}
> +	// Test write shadow (same value with shadow)
> +	set_stage(6);
> +	write_cr0(guest_cr0);
> +	if (stage == 7)
> +		report("Write shadowing CR0 (same value with shadow)", 0);
> +	else
> +		vmcall();
> +	set_stage(7);
> +	write_cr4(guest_cr4);
> +	if (stage == 8)
> +		report("Write shadowing CR4 (same value with shadow)", 0);
> +	else
> +		vmcall();
> +	// Test write shadow (different value)
> +	set_stage(8);
> +	tmp = guest_cr0 ^ X86_CR0_TS;
> +	asm volatile("mov %0, %%rsi\n\t"
> +		"mov %%rsi, %%cr0\n\t"
> +		::"m"(tmp)
> +		:"rsi", "memory", "cc");
> +	if (stage != 9)
> +		report("Write shadowing different X86_CR0_TS", 0);
> +	else
> +		report("Write shadowing different X86_CR0_TS", 1);
> +	set_stage(9);
> +	tmp = guest_cr0 ^ X86_CR0_MP;
> +	asm volatile("mov %0, %%rsi\n\t"
> +		"mov %%rsi, %%cr0\n\t"
> +		::"m"(tmp)
> +		:"rsi", "memory", "cc");
> +	if (stage != 10)
> +		report("Write shadowing different X86_CR0_MP", 0);
> +	else
> +		report("Write shadowing different X86_CR0_MP", 1);
> +	set_stage(10);
> +	tmp = guest_cr4 ^ X86_CR4_TSD;
> +	asm volatile("mov %0, %%rsi\n\t"
> +		"mov %%rsi, %%cr4\n\t"
> +		::"m"(tmp)
> +		:"rsi", "memory", "cc");
> +	if (stage != 11)
> +		report("Write shadowing different X86_CR4_TSD", 0);
> +	else
> +		report("Write shadowing different X86_CR4_TSD", 1);
> +	set_stage(11);
> +	tmp = guest_cr4 ^ X86_CR4_DE;
> +	asm volatile("mov %0, %%rsi\n\t"
> +		"mov %%rsi, %%cr4\n\t"
> +		::"m"(tmp)
> +		:"rsi", "memory", "cc");
> +	if (stage != 12)
> +		report("Write shadowing different X86_CR4_DE", 0);
> +	else
> +		report("Write shadowing different X86_CR4_DE", 1);
> +}
> +
> +static int cr_shadowing_exit_handler()
> +{
> +	u64 guest_rip;
> +	ulong reason;
> +	u32 insn_len;
> +	u32 exit_qual;
> +
> +	guest_rip = vmcs_read(GUEST_RIP);
> +	reason = vmcs_read(EXI_REASON) & 0xff;
> +	insn_len = vmcs_read(EXI_INST_LEN);
> +	exit_qual = vmcs_read(EXI_QUALIFICATION);
> +	switch (reason) {
> +	case VMX_VMCALL:
> +		switch (stage) {
> +		case 0:
> +			if (guest_cr0 == vmcs_read(GUEST_CR0))
> +				report("Read through CR0", 1);
> +			else
> +				report("Read through CR0", 0);
> +			break;
> +		case 1:
> +			if (guest_cr4 == vmcs_read(GUEST_CR4))
> +				report("Read through CR4", 1);
> +			else
> +				report("Read through CR4", 0);
> +			break;
> +		case 2:
> +			if (guest_cr0 == vmcs_read(GUEST_CR0))
> +				report("Write through CR0", 1);
> +			else
> +				report("Write through CR0", 0);
> +			break;
> +		case 3:
> +			if (guest_cr4 == vmcs_read(GUEST_CR4))
> +				report("Write through CR4", 1);
> +			else
> +				report("Write through CR4", 0);
> +			break;
> +		case 4:
> +			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
> +			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
> +			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
> +			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
> +			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
> +			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
> +			break;
> +		case 6:
> +			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
> +				report("Write shadowing CR0 (same value)", 1);
> +			else
> +				report("Write shadowing CR0 (same value)", 0);
> +			break;
> +		case 7:
> +			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
> +				report("Write shadowing CR4 (same value)", 1);
> +			else
> +				report("Write shadowing CR4 (same value)", 0);
> +			break;
> +		}
> +		vmcs_write(GUEST_RIP, guest_rip + insn_len);
> +		return VMX_TEST_RESUME;
> +	case VMX_CR:
> +		switch (stage) {
> +		case 4:
> +			report("Read shadowing CR0", 0);
> +			set_stage(stage + 1);
> +			break;
> +		case 5:
> +			report("Read shadowing CR4", 0);
> +			set_stage(stage + 1);
> +			break;
> +		case 6:
> +			report("Write shadowing CR0 (same value)", 0);
> +			set_stage(stage + 1);
> +			break;
> +		case 7:
> +			report("Write shadowing CR4 (same value)", 0);
> +			set_stage(stage + 1);
> +			break;
> +		case 8:
> +		case 9:
> +			// 0x600 encodes "mov %esi, %cr0"
> +			if (exit_qual == 0x600)
> +				set_stage(stage + 1);
> +			break;
> +		case 10:
> +		case 11:
> +			// 0x604 encodes "mov %esi, %cr4"
> +			if (exit_qual == 0x604)
> +				set_stage(stage + 1);

Better put a "break;" here, I can do that when applying.

Otherwise the diff from v1 looks good.

Paolo

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/4] kvm-unit-tests: VMX: Add test cases for I/O bitmaps
  2013-08-15 11:45 ` [PATCH v2 3/4] kvm-unit-tests: VMX: Add test cases for I/O bitmaps Arthur Chunqi Li
@ 2013-09-09 12:39   ` Paolo Bonzini
  0 siblings, 0 replies; 11+ messages in thread
From: Paolo Bonzini @ 2013-09-09 12:39 UTC (permalink / raw)
  To: Arthur Chunqi Li; +Cc: kvm, jan.kiszka, gleb

Il 15/08/2013 13:45, Arthur Chunqi Li ha scritto:
> Add test cases for I/O bitmaps, including corner cases.
> 
> Test includes: pass & trap, in & out, different I/O width, low & high
> I/O bitmap, partial I/O pass, overrun (inl 0xFFFF).
> 
> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
> ---
>  x86/vmx.h       |    6 +--
>  x86/vmx_tests.c |  159 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 162 insertions(+), 3 deletions(-)
> 
> diff --git a/x86/vmx.h b/x86/vmx.h
> index 18961f1..dba8b20 100644
> --- a/x86/vmx.h
> +++ b/x86/vmx.h
> @@ -417,15 +417,15 @@ enum Ctrl1 {
>  	"popf\n\t"
>  
>  #define VMX_IO_SIZE_MASK		0x7
> -#define _VMX_IO_BYTE			1
> -#define _VMX_IO_WORD			2
> +#define _VMX_IO_BYTE			0
> +#define _VMX_IO_WORD			1
>  #define _VMX_IO_LONG			3
>  #define VMX_IO_DIRECTION_MASK		(1ul << 3)
>  #define VMX_IO_IN			(1ul << 3)
>  #define VMX_IO_OUT			0
>  #define VMX_IO_STRING			(1ul << 4)
>  #define VMX_IO_REP			(1ul << 5)
> -#define VMX_IO_OPRAND_DX		(1ul << 6)
> +#define VMX_IO_OPRAND_IMM		(1ul << 6)
>  #define VMX_IO_PORT_MASK		0xFFFF0000
>  #define VMX_IO_PORT_SHIFT		16
>  
> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
> index a5cc353..cd4dd99 100644
> --- a/x86/vmx_tests.c
> +++ b/x86/vmx_tests.c
> @@ -2,10 +2,13 @@
>  #include "msr.h"
>  #include "processor.h"
>  #include "vm.h"
> +#include "io.h"
>  
>  u64 ia32_pat;
>  u64 ia32_efer;
>  volatile u32 stage;
> +void *io_bitmap_a, *io_bitmap_b;
> +u16 ioport;
>  
>  static inline void vmcall()
>  {
> @@ -473,6 +476,160 @@ static int cr_shadowing_exit_handler()
>  	return VMX_TEST_VMEXIT;
>  }
>  
> +static void iobmp_init()
> +{
> +	u32 ctrl_cpu0;
> +
> +	io_bitmap_a = alloc_page();
> +	io_bitmap_a = alloc_page();
> +	memset(io_bitmap_a, 0x0, PAGE_SIZE);
> +	memset(io_bitmap_b, 0x0, PAGE_SIZE);
> +	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
> +	ctrl_cpu0 |= CPU_IO_BITMAP;
> +	ctrl_cpu0 &= (~CPU_IO);
> +	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
> +	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
> +	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
> +}
> +
> +static void iobmp_main()
> +{
> +	// stage 0, test IO pass
> +	set_stage(0);
> +	inb(0x5000);
> +	outb(0x0, 0x5000);
> +	if (stage != 0)
> +		report("I/O bitmap - I/O pass", 0);
> +	else
> +		report("I/O bitmap - I/O pass", 1);
> +	// test IO width, in/out
> +	((u8 *)io_bitmap_a)[0] = 0xFF;
> +	set_stage(2);
> +	inb(0x0);
> +	if (stage != 3)
> +		report("I/O bitmap - trap in", 0);
> +	else
> +		report("I/O bitmap - trap in", 1);
> +	set_stage(3);
> +	outw(0x0, 0x0);
> +	if (stage != 4)
> +		report("I/O bitmap - trap out", 0);
> +	else
> +		report("I/O bitmap - trap out", 1);
> +	set_stage(4);
> +	inl(0x0);
> +	if (stage != 5)
> +		report("I/O bitmap - I/O width, long", 0);
> +	// test low/high IO port
> +	set_stage(5);
> +	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
> +	inb(0x5000);
> +	if (stage == 6)
> +		report("I/O bitmap - I/O port, low part", 1);
> +	else
> +		report("I/O bitmap - I/O port, low part", 0);
> +	set_stage(6);
> +	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
> +	inb(0x9000);
> +	if (stage == 7)
> +		report("I/O bitmap - I/O port, high part", 1);
> +	else
> +		report("I/O bitmap - I/O port, high part", 0);
> +	// test partial pass
> +	set_stage(7);
> +	inl(0x4FFF);
> +	if (stage == 8)
> +		report("I/O bitmap - partial pass", 1);
> +	else
> +		report("I/O bitmap - partial pass", 0);
> +	// test overrun
> +	set_stage(8);
> +	memset(io_bitmap_a, 0x0, PAGE_SIZE);
> +	memset(io_bitmap_b, 0x0, PAGE_SIZE);
> +	inl(0xFFFF);
> +	if (stage == 9)
> +		report("I/O bitmap - overrun", 1);
> +	else
> +		report("I/O bitmap - overrun", 0);
> +	
> +	return;
> +}
> +
> +static int iobmp_exit_handler()
> +{
> +	u64 guest_rip;
> +	ulong reason, exit_qual;
> +	u32 insn_len;
> +
> +	guest_rip = vmcs_read(GUEST_RIP);
> +	reason = vmcs_read(EXI_REASON) & 0xff;
> +	exit_qual = vmcs_read(EXI_QUALIFICATION);
> +	insn_len = vmcs_read(EXI_INST_LEN);
> +	switch (reason) {
> +	case VMX_IO:
> +		switch (stage) {
> +		case 2:
> +			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
> +				report("I/O bitmap - I/O width, byte", 0);
> +			else
> +				report("I/O bitmap - I/O width, byte", 1);
> +			if (!(exit_qual & VMX_IO_IN))
> +				report("I/O bitmap - I/O direction, in", 0);
> +			else
> +				report("I/O bitmap - I/O direction, in", 1);
> +			set_stage(stage + 1);
> +			break;
> +		case 3:
> +			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
> +				report("I/O bitmap - I/O width, word", 0);
> +			else
> +				report("I/O bitmap - I/O width, word", 1);
> +			if (!(exit_qual & VMX_IO_IN))
> +				report("I/O bitmap - I/O direction, out", 1);
> +			else
> +				report("I/O bitmap - I/O direction, out", 0);
> +			set_stage(stage + 1);
> +			break;
> +		case 4:
> +			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
> +				report("I/O bitmap - I/O width, long", 0);
> +			else
> +				report("I/O bitmap - I/O width, long", 1);
> +			set_stage(stage + 1);
> +			break;
> +		case 5:
> +			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
> +				set_stage(stage + 1);
> +			break;
> +		case 6:
> +			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
> +				set_stage(stage + 1);
> +			break;
> +		case 7:
> +			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
> +				set_stage(stage + 1);
> +			break;
> +		case 8:
> +			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
> +				set_stage(stage + 1);
> +			break;
> +		case 0:
> +		case 1:
> +			set_stage(stage + 1);

Please keep these in order.

> +		default:
> +			// Should not reach here
> +			break;

These are not consistent with patch 2 where you have omitted these
defaults; if it is not reachable, you should report a failure.

Otherwise, the diff from v1 looks good.

Paolo

> +		}
> +		vmcs_write(GUEST_RIP, guest_rip + insn_len);
> +		return VMX_TEST_RESUME;
> +	default:
> +		printf("guest_rip = 0x%llx\n", guest_rip);
> +		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
> +		break;
> +	}
> +	return VMX_TEST_VMEXIT;
> +}
> +
>  /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
>     basic_* just implement some basic functions */
>  struct vmx_test vmx_tests[] = {
> @@ -486,5 +643,7 @@ struct vmx_test vmx_tests[] = {
>  		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
>  	{ "CR shadowing", basic_init, cr_shadowing_main,
>  		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
> +	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
> +		basic_syscall_handler, {0} },
>  	{ NULL, NULL, NULL, NULL, NULL, {0} },
>  };
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 4/4] kvm-unit-tests: VMX: Add test cases for instruction interception
  2013-08-15 11:45 ` [PATCH v2 4/4] kvm-unit-tests: VMX: Add test cases for instruction interception Arthur Chunqi Li
@ 2013-09-09 12:42   ` Paolo Bonzini
  0 siblings, 0 replies; 11+ messages in thread
From: Paolo Bonzini @ 2013-09-09 12:42 UTC (permalink / raw)
  To: Arthur Chunqi Li; +Cc: kvm, jan.kiszka, gleb

Il 15/08/2013 13:45, Arthur Chunqi Li ha scritto:
> Add test cases for instruction interception, including four types:
> 1. Primary Processor-Based VM-Execution Controls (HLT/INVLPG/MWAIT/
> RDPMC/RDTSC/MONITOR/PAUSE)
> 2. Secondary Processor-Based VM-Execution Controls (WBINVD)
> 3. No control flag, always trap (CPUID/INVD)
> 4. Instructions always pass
> 
> Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
> ---
>  x86/vmx.c       |    3 +-
>  x86/vmx.h       |    7 +++
>  x86/vmx_tests.c |  152 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 160 insertions(+), 2 deletions(-)
> 
> diff --git a/x86/vmx.c b/x86/vmx.c
> index ca36d35..c346070 100644
> --- a/x86/vmx.c
> +++ b/x86/vmx.c
> @@ -336,8 +336,7 @@ static void init_vmx(void)
>  			: MSR_IA32_VMX_ENTRY_CTLS);
>  	ctrl_cpu_rev[0].val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PROC
>  			: MSR_IA32_VMX_PROCBASED_CTLS);
> -	if (ctrl_cpu_rev[0].set & CPU_SECONDARY)
> -		ctrl_cpu_rev[1].val = rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
> +	ctrl_cpu_rev[1].val = rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
>  	if (ctrl_cpu_rev[1].set & CPU_EPT || ctrl_cpu_rev[1].set & CPU_VPID)
>  		ept_vpid.val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
>  
> diff --git a/x86/vmx.h b/x86/vmx.h
> index dba8b20..2784ac6 100644
> --- a/x86/vmx.h
> +++ b/x86/vmx.h
> @@ -354,12 +354,17 @@ enum Ctrl0 {
>  	CPU_INTR_WINDOW		= 1ul << 2,
>  	CPU_HLT			= 1ul << 7,
>  	CPU_INVLPG		= 1ul << 9,
> +	CPU_MWAIT		= 1ul << 10,
> +	CPU_RDPMC		= 1ul << 11,
> +	CPU_RDTSC		= 1ul << 12,
>  	CPU_CR3_LOAD		= 1ul << 15,
>  	CPU_CR3_STORE		= 1ul << 16,
>  	CPU_TPR_SHADOW		= 1ul << 21,
>  	CPU_NMI_WINDOW		= 1ul << 22,
>  	CPU_IO			= 1ul << 24,
>  	CPU_IO_BITMAP		= 1ul << 25,
> +	CPU_MONITOR		= 1ul << 29,
> +	CPU_PAUSE		= 1ul << 30,

Please keep these sorted.

>  	CPU_MSR_BITMAP		= 1ul << 28,
>  	CPU_SECONDARY		= 1ul << 31,
>  };
> @@ -368,6 +373,8 @@ enum Ctrl1 {
>  	CPU_EPT			= 1ul << 1,
>  	CPU_VPID		= 1ul << 5,
>  	CPU_URG			= 1ul << 7,
> +	CPU_WBINVD		= 1ul << 6,
> +	CPU_RDRAND		= 1ul << 11,
>  };
>  
>  #define SAVE_GPR				\
> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
> index cd4dd99..be3e3b4 100644
> --- a/x86/vmx_tests.c
> +++ b/x86/vmx_tests.c
> @@ -22,6 +22,16 @@ static inline void set_stage(u32 s)
>  	barrier();
>  }
>  
> +static inline u32 get_stage()
> +{
> +	u32 s;
> +
> +	barrier();
> +	s = stage;
> +	barrier();
> +	return s;
> +}
> +
>  void basic_init()
>  {
>  }
> @@ -630,6 +640,146 @@ static int iobmp_exit_handler()
>  	return VMX_TEST_VMEXIT;
>  }
>  
> +#define INSN_CPU0		0
> +#define INSN_CPU1		1
> +#define INSN_ALWAYS_TRAP	2
> +#define INSN_NEVER_TRAP		3
> +
> +#define FIELD_EXIT_QUAL		0
> +#define FIELD_INSN_INFO		1
> +
> +asm(
> +	"insn_hlt: hlt;ret\n\t"
> +	"insn_invlpg: invlpg 0x12345678;ret\n\t"
> +	"insn_mwait: mwait;ret\n\t"
> +	"insn_rdpmc: rdpmc;ret\n\t"
> +	"insn_rdtsc: rdtsc;ret\n\t"
> +	"insn_monitor: monitor;ret\n\t"
> +	"insn_pause: pause;ret\n\t"
> +	"insn_wbinvd: wbinvd;ret\n\t"
> +	"insn_cpuid: cpuid;ret\n\t"
> +	"insn_invd: invd;ret\n\t"
> +);
> +extern void insn_hlt();
> +extern void insn_invlpg();
> +extern void insn_mwait();
> +extern void insn_rdpmc();
> +extern void insn_rdtsc();
> +extern void insn_monitor();
> +extern void insn_pause();
> +extern void insn_wbinvd();
> +extern void insn_cpuid();
> +extern void insn_invd();
> +
> +u32 cur_insn;
> +
> +struct insn_table {
> +	const char *name;
> +	u32 flag;
> +	void (*insn_func)();
> +	u32 type;
> +	u32 reason;
> +	ulong exit_qual;
> +	u32 insn_info;
> +	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
> +	// which field need to be tested, reason is always tested
> +	u32 test_field;
> +};
> +
> +static struct insn_table insn_table[] = {
> +	// Flags for Primary Processor-Based VM-Execution Controls
> +	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
> +	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
> +		0x12345678, 0, FIELD_EXIT_QUAL},
> +	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
> +	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
> +	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
> +	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
> +	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
> +	// Flags for Secondary Processor-Based VM-Execution Controls
> +	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
> +	// Instructions always trap
> +	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
> +	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
> +	// Instructions never trap
> +	{NULL},
> +};
> +
> +static void insn_intercept_init()
> +{
> +	u32 ctrl_cpu[2];
> +
> +	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
> +	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
> +		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
> +	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
> +	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
> +	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
> +	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
> +	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
> +	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
> +}
> +
> +static void insn_intercept_main()
> +{
> +	cur_insn = 0;
> +	while(insn_table[cur_insn].name != NULL) {
> +		set_stage(cur_insn);
> +		if ((insn_table[cur_insn].type == INSN_CPU0
> +			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
> +			|| (insn_table[cur_insn].type == INSN_CPU1
> +			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
> +			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
> +				insn_table[cur_insn].name);
> +			continue;
> +		}
> +		insn_table[cur_insn].insn_func();
> +		switch (insn_table[cur_insn].type) {
> +		case INSN_CPU0:
> +		case INSN_CPU1:
> +		case INSN_ALWAYS_TRAP:
> +			if (stage != cur_insn + 1)
> +				report(insn_table[cur_insn].name, 0);
> +			else
> +				report(insn_table[cur_insn].name, 1);
> +			break;
> +		case INSN_NEVER_TRAP:
> +			if (stage == cur_insn + 1)
> +				report(insn_table[cur_insn].name, 0);
> +			else
> +				report(insn_table[cur_insn].name, 1);
> +			break;
> +		}
> +		cur_insn ++;

No space before ++.

Otherwise looks good.

Paolo

> +	}
> +}
> +
> +static int insn_intercept_exit_handler()
> +{
> +	u64 guest_rip;
> +	u32 reason;
> +	ulong exit_qual;
> +	u32 insn_len;
> +	u32 insn_info;
> +	bool pass;
> +
> +	guest_rip = vmcs_read(GUEST_RIP);
> +	reason = vmcs_read(EXI_REASON) & 0xff;
> +	exit_qual = vmcs_read(EXI_QUALIFICATION);
> +	insn_len = vmcs_read(EXI_INST_LEN);
> +	insn_info = vmcs_read(EXI_INST_INFO);
> +	pass = (cur_insn == get_stage()) &&
> +			insn_table[cur_insn].reason == reason;
> +	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
> +		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
> +	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
> +		pass = pass && insn_table[cur_insn].insn_info == insn_info;
> +	if (pass)
> +		set_stage(stage + 1);
> +	vmcs_write(GUEST_RIP, guest_rip + insn_len);
> +	return VMX_TEST_RESUME;
> +}
> +
>  /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
>     basic_* just implement some basic functions */
>  struct vmx_test vmx_tests[] = {
> @@ -645,5 +795,7 @@ struct vmx_test vmx_tests[] = {
>  		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
>  	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
>  		basic_syscall_handler, {0} },
> +	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
> +		insn_intercept_exit_handler, basic_syscall_handler, {0} },
>  	{ NULL, NULL, NULL, NULL, NULL, {0} },
>  };
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2013-09-09 12:42 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-08-15 11:45 [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
2013-08-15 11:45 ` [PATCH v2 1/4] kvm-unit-tests: VMX: Add test cases for PAT and EFER Arthur Chunqi Li
2013-08-15 11:45 ` [PATCH v2 2/4] kvm-unit-tests: VMX: Add test cases for CR0/4 shadowing Arthur Chunqi Li
2013-09-09 12:36   ` Paolo Bonzini
2013-08-15 11:45 ` [PATCH v2 3/4] kvm-unit-tests: VMX: Add test cases for I/O bitmaps Arthur Chunqi Li
2013-09-09 12:39   ` Paolo Bonzini
2013-08-15 11:45 ` [PATCH v2 4/4] kvm-unit-tests: VMX: Add test cases for instruction interception Arthur Chunqi Li
2013-09-09 12:42   ` Paolo Bonzini
2013-09-02  9:06 ` [PATCH v2 0/4] kvm-unit-tests: Add a series of test cases Arthur Chunqi Li
2013-09-03 11:50   ` Gleb Natapov
2013-09-09  7:15     ` Jan Kiszka

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).