* [kvm-unit-tests PATCH v6 1/2] x86: nvmx: fix bug in __enter_guest()
@ 2019-09-20 22:29 Marc Orr
2019-09-20 22:29 ` [kvm-unit-tests PATCH v6 2/2] x86: nvmx: test max atomic switch MSRs Marc Orr
0 siblings, 1 reply; 4+ messages in thread
From: Marc Orr @ 2019-09-20 22:29 UTC (permalink / raw)
To: kvm, jmattson, pshier, sean.j.christopherson, krish.sadhukhan,
pbonzini, rkrcmar
Cc: Marc Orr
__enter_guest() should only set the launched flag when a launch has
succeeded. Thus, don't set the launched flag when the VMX_ENTRY_FAILURE,
bit 31, is set in the VMCS exit reason.
Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Marc Orr <marcorr@google.com>
---
v5 -> v6
* No changes.
x86/vmx.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/x86/vmx.c b/x86/vmx.c
index 6079420db33a..7313c78f15c2 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -1820,7 +1820,7 @@ static void __enter_guest(u8 abort_flag, struct vmentry_failure *failure)
abort();
}
- if (!failure->early) {
+ if (!failure->early && !(vmcs_read(EXI_REASON) & VMX_ENTRY_FAILURE)) {
launched = 1;
check_for_guest_termination();
}
--
2.23.0.351.gc4317032e6-goog
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [kvm-unit-tests PATCH v6 2/2] x86: nvmx: test max atomic switch MSRs
2019-09-20 22:29 [kvm-unit-tests PATCH v6 1/2] x86: nvmx: fix bug in __enter_guest() Marc Orr
@ 2019-09-20 22:29 ` Marc Orr
2019-09-23 20:08 ` Sean Christopherson
0 siblings, 1 reply; 4+ messages in thread
From: Marc Orr @ 2019-09-20 22:29 UTC (permalink / raw)
To: kvm, jmattson, pshier, sean.j.christopherson, krish.sadhukhan,
pbonzini, rkrcmar
Cc: Marc Orr
Excerise nested VMX's atomic MSR switch code (e.g., VM-entry MSR-load
list) at the maximum number of MSRs supported, as described in the SDM,
in the appendix chapter titled "MISCELLANEOUS DATA".
Suggested-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Signed-off-by: Marc Orr <marcorr@google.com>
---
v5 -> v6
* Replaced atomic_switch_msr_limit_test_guest() (which was supposed to
vmcall() in v5!) w/ v2_null_test_guest() and updated exit handling
logic accordingly.
lib/alloc_page.c | 5 ++
lib/alloc_page.h | 1 +
x86/unittests.cfg | 2 +-
x86/vmx_tests.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 130 insertions(+), 1 deletion(-)
diff --git a/lib/alloc_page.c b/lib/alloc_page.c
index 97d13395ff08..ed236389537e 100644
--- a/lib/alloc_page.c
+++ b/lib/alloc_page.c
@@ -53,6 +53,11 @@ void free_pages(void *mem, unsigned long size)
spin_unlock(&lock);
}
+void free_pages_by_order(void *mem, unsigned long order)
+{
+ free_pages(mem, 1ul << (order + PAGE_SHIFT));
+}
+
void *alloc_page()
{
void *p;
diff --git a/lib/alloc_page.h b/lib/alloc_page.h
index 5cdfec57a0a8..739a91def979 100644
--- a/lib/alloc_page.h
+++ b/lib/alloc_page.h
@@ -14,5 +14,6 @@ void *alloc_page(void);
void *alloc_pages(unsigned long order);
void free_page(void *page);
void free_pages(void *mem, unsigned long size);
+void free_pages_by_order(void *mem, unsigned long order);
#endif
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 694ee3d42f3a..05122cf91ea1 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -227,7 +227,7 @@ extra_params = -cpu qemu64,+umip
[vmx]
file = vmx.flat
-extra_params = -cpu host,+vmx -append "-exit_monitor_from_l2_test -ept_access* -vmx_smp* -vmx_vmcs_shadow_test"
+extra_params = -cpu host,+vmx -append "-exit_monitor_from_l2_test -ept_access* -vmx_smp* -vmx_vmcs_shadow_test -atomic_switch_overflow_msrs_test"
arch = x86_64
groups = vmx
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index f035f24a771a..f8ee0f0f8f2b 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -8570,6 +8570,126 @@ static int invalid_msr_entry_failure(struct vmentry_failure *failure)
return VMX_TEST_VMEXIT;
}
+/*
+ * The max number of MSRs in an atomic switch MSR list is:
+ * (111B + 1) * 512 = 4096
+ *
+ * Each list entry consumes:
+ * 4-byte MSR index + 4 bytes reserved + 8-byte data = 16 bytes
+ *
+ * Allocate 128 kB to cover max_msr_list_size (i.e., 64 kB) and then some.
+ */
+static const u32 msr_list_page_order = 5;
+
+static void populate_msr_list(struct vmx_msr_entry *msr_list,
+ size_t byte_capacity, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ msr_list[i].index = MSR_IA32_TSC;
+ msr_list[i].reserved = 0;
+ msr_list[i].value = 0x1234567890abcdef;
+ }
+
+ memset(msr_list + count, 0xff,
+ byte_capacity - count * sizeof(*msr_list));
+}
+
+static int max_msr_list_size(void)
+{
+ u32 vmx_misc = rdmsr(MSR_IA32_VMX_MISC);
+ u32 factor = ((vmx_misc & GENMASK(27, 25)) >> 25) + 1;
+
+ return factor * 512;
+}
+
+static void atomic_switch_msrs_test(int count)
+{
+ int max_allowed = max_msr_list_size();
+ int byte_capacity = 1ul << (msr_list_page_order + PAGE_SHIFT);
+ /* KVM signals VM-Abort if an exit MSR list exceeds the max size. */
+ int exit_count = MIN(count, max_allowed);
+
+ /*
+ * Check for the IA32_TSC MSR,
+ * available with the "TSC flag" and used to populate the MSR lists.
+ */
+ if (!(cpuid(1).d & (1 << 4))) {
+ report_skip(__func__);
+ return;
+ }
+
+ /* Set L2 guest. */
+ test_set_guest(v2_null_test_guest);
+
+ /* Setup atomic MSR switch lists. */
+ entry_msr_load = alloc_pages(msr_list_page_order);
+ exit_msr_load = alloc_pages(msr_list_page_order);
+ exit_msr_store = alloc_pages(msr_list_page_order);
+
+ vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load);
+ vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load);
+ vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store);
+
+ /*
+ * VM-Enter should succeed up to the max number of MSRs per list, and
+ * should not consume junk beyond the last entry.
+ */
+ populate_msr_list(entry_msr_load, byte_capacity, count);
+ populate_msr_list(exit_msr_load, byte_capacity, exit_count);
+ populate_msr_list(exit_msr_store, byte_capacity, exit_count);
+
+ vmcs_write(ENT_MSR_LD_CNT, count);
+ vmcs_write(EXI_MSR_LD_CNT, exit_count);
+ vmcs_write(EXI_MSR_ST_CNT, exit_count);
+
+ if (count <= max_allowed) {
+ /*
+ * enter_guest() verifies that VM-enter succeeds. After the
+ * test completes, the test harness (see test_run() in vmx.c)
+ * verifies that the VM-enter completes by reaching the end of
+ * v2_null_test_guest().
+ */
+ enter_guest();
+ } else {
+ u32 exit_reason;
+ u32 exit_reason_want;
+ u32 exit_qual;
+
+ enter_guest_with_invalid_guest_state();
+
+ exit_reason = vmcs_read(EXI_REASON);
+ exit_reason_want = VMX_FAIL_MSR | VMX_ENTRY_FAILURE;
+ report("exit_reason, %u, is %u.",
+ exit_reason == exit_reason_want, exit_reason,
+ exit_reason_want);
+
+ exit_qual = vmcs_read(EXI_QUALIFICATION);
+ report("exit_qual, %u, is %u.", exit_qual == max_allowed + 1,
+ exit_qual, max_allowed + 1);
+
+ /* Enter the guest (with valid counts) to set guest_finished. */
+ vmcs_write(ENT_MSR_LD_CNT, 0);
+ vmcs_write(EXI_MSR_LD_CNT, 0);
+ vmcs_write(EXI_MSR_ST_CNT, 0);
+ enter_guest();
+ }
+
+ free_pages_by_order(entry_msr_load, msr_list_page_order);
+ free_pages_by_order(exit_msr_load, msr_list_page_order);
+ free_pages_by_order(exit_msr_store, msr_list_page_order);
+}
+
+static void atomic_switch_max_msrs_test(void)
+{
+ atomic_switch_msrs_test(max_msr_list_size());
+}
+
+static void atomic_switch_overflow_msrs_test(void)
+{
+ atomic_switch_msrs_test(max_msr_list_size() + 1);
+}
#define TEST(name) { #name, .v2 = name }
@@ -8660,5 +8780,8 @@ struct vmx_test vmx_tests[] = {
TEST(ept_access_test_paddr_read_execute_ad_enabled),
TEST(ept_access_test_paddr_not_present_page_fault),
TEST(ept_access_test_force_2m_page),
+ /* Atomic MSR switch tests. */
+ TEST(atomic_switch_max_msrs_test),
+ TEST(atomic_switch_overflow_msrs_test),
{ NULL, NULL, NULL, NULL, NULL, {0} },
};
--
2.23.0.351.gc4317032e6-goog
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [kvm-unit-tests PATCH v6 2/2] x86: nvmx: test max atomic switch MSRs
2019-09-20 22:29 ` [kvm-unit-tests PATCH v6 2/2] x86: nvmx: test max atomic switch MSRs Marc Orr
@ 2019-09-23 20:08 ` Sean Christopherson
2019-09-25 1:18 ` Marc Orr
0 siblings, 1 reply; 4+ messages in thread
From: Sean Christopherson @ 2019-09-23 20:08 UTC (permalink / raw)
To: Marc Orr; +Cc: kvm, jmattson, pshier, krish.sadhukhan, pbonzini, rkrcmar
On Fri, Sep 20, 2019 at 03:29:45PM -0700, Marc Orr wrote:
> + u32 exit_reason;
> + u32 exit_reason_want;
> + u32 exit_qual;
> +
> + enter_guest_with_invalid_guest_state();
> +
> + exit_reason = vmcs_read(EXI_REASON);
> + exit_reason_want = VMX_FAIL_MSR | VMX_ENTRY_FAILURE;
> + report("exit_reason, %u, is %u.",
> + exit_reason == exit_reason_want, exit_reason,
> + exit_reason_want);
> +
> + exit_qual = vmcs_read(EXI_QUALIFICATION);
> + report("exit_qual, %u, is %u.", exit_qual == max_allowed + 1,
> + exit_qual, max_allowed + 1);
I'd stick with the more standard "val %u, expected %u" verbiage. E.g.:
exit_reason, 34, is 35
versus
exit_reason 34, expected 35
The "is" part makes it sound like the value in the VMCS *is* 35.
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [kvm-unit-tests PATCH v6 2/2] x86: nvmx: test max atomic switch MSRs
2019-09-23 20:08 ` Sean Christopherson
@ 2019-09-25 1:18 ` Marc Orr
0 siblings, 0 replies; 4+ messages in thread
From: Marc Orr @ 2019-09-25 1:18 UTC (permalink / raw)
To: Sean Christopherson
Cc: kvm, Jim Mattson, Peter Shier, Krish Sadhukhan, Paolo Bonzini,
Radim Krčmář
On Mon, Sep 23, 2019 at 1:08 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Fri, Sep 20, 2019 at 03:29:45PM -0700, Marc Orr wrote:
> > + u32 exit_reason;
> > + u32 exit_reason_want;
> > + u32 exit_qual;
> > +
> > + enter_guest_with_invalid_guest_state();
> > +
> > + exit_reason = vmcs_read(EXI_REASON);
> > + exit_reason_want = VMX_FAIL_MSR | VMX_ENTRY_FAILURE;
> > + report("exit_reason, %u, is %u.",
> > + exit_reason == exit_reason_want, exit_reason,
> > + exit_reason_want);
> > +
> > + exit_qual = vmcs_read(EXI_QUALIFICATION);
> > + report("exit_qual, %u, is %u.", exit_qual == max_allowed + 1,
> > + exit_qual, max_allowed + 1);
>
> I'd stick with the more standard "val %u, expected %u" verbiage. E.g.:
>
> exit_reason, 34, is 35
>
> versus
>
> exit_reason 34, expected 35
>
> The "is" part makes it sound like the value in the VMCS *is* 35.
Done.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-09-25 1:19 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-09-20 22:29 [kvm-unit-tests PATCH v6 1/2] x86: nvmx: fix bug in __enter_guest() Marc Orr
2019-09-20 22:29 ` [kvm-unit-tests PATCH v6 2/2] x86: nvmx: test max atomic switch MSRs Marc Orr
2019-09-23 20:08 ` Sean Christopherson
2019-09-25 1:18 ` Marc Orr
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).