public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Jon Kohler <jon@nutanix.com>
To: seanjc@google.com, pbonzini@redhat.com, kvm@vger.kernel.org,
	Jon Kohler <jon@nutanix.com>
Subject: [kvm-unit-tests PATCH 10/17] x86/vmx: switch to new vmx.h primary processor-based VM-execution controls
Date: Tue, 16 Sep 2025 10:22:39 -0700	[thread overview]
Message-ID: <20250916172247.610021-11-jon@nutanix.com> (raw)
In-Reply-To: <20250916172247.610021-1-jon@nutanix.com>

Migrate to new vmx.h's primary processor-based VM-execution controls,
which makes it easier to grok from one code base to another.

Save secondary execution controls bit 31 for the next patch.

No functional change intended.

Signed-off-by: Jon Kohler <jon@nutanix.com>

---
 lib/linux/vmx.h |   1 +
 x86/vmx.c       |   2 +-
 x86/vmx.h       |  19 ------
 x86/vmx_tests.c | 157 ++++++++++++++++++++++++++----------------------
 4 files changed, 87 insertions(+), 92 deletions(-)

diff --git a/lib/linux/vmx.h b/lib/linux/vmx.h
index 5973bd86..f3c2aacc 100644
--- a/lib/linux/vmx.h
+++ b/lib/linux/vmx.h
@@ -16,6 +16,7 @@
 #include "libcflat.h"
 #include "trapnr.h"
 #include "util.h"
+#include "vmxfeatures.h"
 
 #define VMCS_CONTROL_BIT(x)	BIT(VMX_FEATURE_##x & 0x1f)
 
diff --git a/x86/vmx.c b/x86/vmx.c
index df9a23c7..c1845cea 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -1258,7 +1258,7 @@ int init_vmcs(struct vmcs **vmcs)
 	ctrl_exit = EXI_LOAD_EFER | EXI_HOST_64 | EXI_LOAD_PAT;
 	ctrl_enter = (ENT_LOAD_EFER | ENT_GUEST_64);
 	/* DIsable IO instruction VMEXIT now */
-	ctrl_cpu[0] &= (~(CPU_IO | CPU_IO_BITMAP));
+	ctrl_cpu[0] &= (~(CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_USE_IO_BITMAPS));
 	ctrl_cpu[1] = 0;
 
 	ctrl_pin = (ctrl_pin | ctrl_pin_rev.set) & ctrl_pin_rev.clr;
diff --git a/x86/vmx.h b/x86/vmx.h
index 4d13ad91..a83d08b8 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -436,25 +436,6 @@ enum Ctrl_pin {
 };
 
 enum Ctrl0 {
-	CPU_INTR_WINDOW		= 1ul << 2,
-	CPU_USE_TSC_OFFSET	= 1ul << 3,
-	CPU_HLT			= 1ul << 7,
-	CPU_INVLPG		= 1ul << 9,
-	CPU_MWAIT		= 1ul << 10,
-	CPU_RDPMC		= 1ul << 11,
-	CPU_RDTSC		= 1ul << 12,
-	CPU_CR3_LOAD		= 1ul << 15,
-	CPU_CR3_STORE		= 1ul << 16,
-	CPU_CR8_LOAD		= 1ul << 19,
-	CPU_CR8_STORE		= 1ul << 20,
-	CPU_TPR_SHADOW		= 1ul << 21,
-	CPU_NMI_WINDOW		= 1ul << 22,
-	CPU_IO			= 1ul << 24,
-	CPU_IO_BITMAP		= 1ul << 25,
-	CPU_MTF			= 1ul << 27,
-	CPU_MSR_BITMAP		= 1ul << 28,
-	CPU_MONITOR		= 1ul << 29,
-	CPU_PAUSE		= 1ul << 30,
 	CPU_SECONDARY		= 1ul << 31,
 };
 
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 5ca4b79b..55d151a4 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -266,7 +266,7 @@ static void msr_bmp_init(void)
 
 	msr_bitmap = alloc_page();
 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
-	ctrl_cpu0 |= CPU_MSR_BITMAP;
+	ctrl_cpu0 |= CPU_BASED_USE_MSR_BITMAPS;
 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
 }
@@ -275,13 +275,13 @@ static void *get_msr_bitmap(void)
 {
 	void *msr_bitmap;
 
-	if (vmcs_read(CPU_EXEC_CTRL0) & CPU_MSR_BITMAP) {
+	if (vmcs_read(CPU_EXEC_CTRL0) & CPU_BASED_USE_MSR_BITMAPS) {
 		msr_bitmap = (void *)vmcs_read(MSR_BITMAP);
 	} else {
 		msr_bitmap = alloc_page();
 		memset(msr_bitmap, 0xff, PAGE_SIZE);
 		vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
-		vmcs_set_bits(CPU_EXEC_CTRL0, CPU_MSR_BITMAP);
+		vmcs_set_bits(CPU_EXEC_CTRL0, CPU_BASED_USE_MSR_BITMAPS);
 	}
 
 	return msr_bitmap;
@@ -643,8 +643,8 @@ static int iobmp_init(struct vmcs *vmcs)
 	io_bitmap_a = alloc_page();
 	io_bitmap_b = alloc_page();
 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
-	ctrl_cpu0 |= CPU_IO_BITMAP;
-	ctrl_cpu0 &= (~CPU_IO);
+	ctrl_cpu0 |= CPU_BASED_USE_IO_BITMAPS;
+	ctrl_cpu0 &= (~CPU_BASED_UNCOND_IO_EXITING);
 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
@@ -754,7 +754,8 @@ static int iobmp_exit_handler(union exit_reason exit_reason)
 		case 9:
 		case 10:
 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
-			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
+			vmcs_write(CPU_EXEC_CTRL0,
+				   ctrl_cpu0 & ~CPU_BASED_UNCOND_IO_EXITING);
 			vmx_inc_test_stage();
 			break;
 		default:
@@ -770,12 +771,14 @@ static int iobmp_exit_handler(union exit_reason exit_reason)
 		switch (vmx_get_test_stage()) {
 		case 9:
 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
-			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
+			ctrl_cpu0 |= CPU_BASED_UNCOND_IO_EXITING |
+				     CPU_BASED_USE_IO_BITMAPS;
 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
 			break;
 		case 10:
 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
-			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
+			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_BASED_USE_IO_BITMAPS) |
+						 CPU_BASED_UNCOND_IO_EXITING;
 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
 			break;
 		default:
@@ -886,22 +889,25 @@ struct insn_table {
  */
 static struct insn_table insn_table[] = {
 	// Flags for Primary Processor-Based VM-Execution Controls
-	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
-	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
+	{"HLT",  CPU_BASED_HLT_EXITING, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
+	{"INVLPG", CPU_BASED_INVLPG_EXITING, insn_invlpg, INSN_CPU0, 14,
 		0x12345678, 0, FIELD_EXIT_QUAL},
-	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0, this_cpu_has_mwait},
-	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0, this_cpu_has_pmu},
-	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
-	{"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
-		FIELD_EXIT_QUAL},
-	{"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
-		FIELD_EXIT_QUAL},
-	{"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
-		FIELD_EXIT_QUAL},
-	{"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
-		FIELD_EXIT_QUAL},
-	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0, this_cpu_has_mwait},
-	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
+	{"MWAIT", CPU_BASED_MWAIT_EXITING, insn_mwait, INSN_CPU0, 36, 0, 0, 0,
+		this_cpu_has_mwait},
+	{"RDPMC", CPU_BASED_RDPMC_EXITING, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0,
+		this_cpu_has_pmu},
+	{"RDTSC", CPU_BASED_RDTSC_EXITING, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
+	{"CR3 load", CPU_BASED_CR3_LOAD_EXITING, insn_cr3_load, INSN_CPU0, 28,
+		0x3, 0, FIELD_EXIT_QUAL},
+	{"CR3 store", CPU_BASED_CR3_STORE_EXITING, insn_cr3_store, INSN_CPU0,
+		28, 0x13, 0, FIELD_EXIT_QUAL},
+	{"CR8 load", CPU_BASED_CR8_LOAD_EXITING, insn_cr8_load, INSN_CPU0, 28,
+		0x8, 0,	FIELD_EXIT_QUAL},
+	{"CR8 store", CPU_BASED_CR8_STORE_EXITING, insn_cr8_store, INSN_CPU0,
+		28, 0x18, 0, FIELD_EXIT_QUAL},
+	{"MONITOR", CPU_BASED_MONITOR_TRAP_FLAG, insn_monitor, INSN_CPU0, 39,
+		0, 0, 0, this_cpu_has_mwait},
+	{"PAUSE", CPU_BASED_PAUSE_EXITING, insn_pause, INSN_CPU0, 40, 0, 0, 0},
 	// Flags for Secondary Processor-Based VM-Execution Controls
 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
 	{"DESC_TABLE (SGDT)", CPU_DESC_TABLE, insn_sgdt, INSN_CPU1, 46, 0, 0, 0},
@@ -3814,10 +3820,10 @@ static void test_vmcs_addr_reference(u32 control_bit, enum Encoding field,
  */
 static void test_io_bitmaps(void)
 {
-	test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_A,
+	test_vmcs_addr_reference(CPU_BASED_USE_IO_BITMAPS, IO_BITMAP_A,
 				 "I/O bitmap A", "Use I/O bitmaps",
 				 PAGE_SIZE, false, true);
-	test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_B,
+	test_vmcs_addr_reference(CPU_BASED_USE_IO_BITMAPS, IO_BITMAP_B,
 				 "I/O bitmap B", "Use I/O bitmaps",
 				 PAGE_SIZE, false, true);
 }
@@ -3830,7 +3836,7 @@ static void test_io_bitmaps(void)
  */
 static void test_msr_bitmap(void)
 {
-	test_vmcs_addr_reference(CPU_MSR_BITMAP, MSR_BITMAP,
+	test_vmcs_addr_reference(CPU_BASED_USE_MSR_BITMAPS, MSR_BITMAP,
 				 "MSR bitmap", "Use MSR bitmaps",
 				 PAGE_SIZE, false, true);
 }
@@ -3851,8 +3857,9 @@ static void test_apic_virt_addr(void)
 	 * what we're trying to achieve and fails vmentry.
 	 */
 	u32 cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0);
-	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0 | CPU_CR8_LOAD | CPU_CR8_STORE);
-	test_vmcs_addr_reference(CPU_TPR_SHADOW, APIC_VIRT_ADDR,
+	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0 | CPU_BASED_CR8_LOAD_EXITING |
+		   CPU_BASED_CR8_STORE_EXITING);
+	test_vmcs_addr_reference(CPU_BASED_TPR_SHADOW, APIC_VIRT_ADDR,
 				 "virtual-APIC address", "Use TPR shadow",
 				 PAGE_SIZE, false, true);
 	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0);
@@ -3924,18 +3931,18 @@ static void test_apic_virtual_ctls(void)
 	/*
 	 * First test
 	 */
-	if (!((ctrl_cpu_rev[0].clr & (CPU_SECONDARY | CPU_TPR_SHADOW)) ==
-	    (CPU_SECONDARY | CPU_TPR_SHADOW)))
+	if (!((ctrl_cpu_rev[0].clr & (CPU_SECONDARY | CPU_BASED_TPR_SHADOW)) ==
+	    (CPU_SECONDARY | CPU_BASED_TPR_SHADOW)))
 		return;
 
 	primary |= CPU_SECONDARY;
-	primary &= ~CPU_TPR_SHADOW;
+	primary &= ~CPU_BASED_TPR_SHADOW;
 	vmcs_write(CPU_EXEC_CTRL0, primary);
 
 	while (1) {
 		for (j = 1; j < 8; j++) {
 			secondary &= ~(CPU_VIRT_X2APIC | CPU_APIC_REG_VIRT | CPU_VINTD);
-			if (primary & CPU_TPR_SHADOW) {
+			if (primary & CPU_BASED_TPR_SHADOW) {
 				is_ctrl_valid = true;
 			} else {
 				if (! set_bit_pattern(j, &secondary))
@@ -3958,7 +3965,7 @@ static void test_apic_virtual_ctls(void)
 			break;
 		i++;
 
-		primary |= CPU_TPR_SHADOW;
+		primary |= CPU_BASED_TPR_SHADOW;
 		vmcs_write(CPU_EXEC_CTRL0, primary);
 		strcpy(str, "enabled");
 	}
@@ -4017,7 +4024,8 @@ static void test_virtual_intr_ctls(void)
 	    (ctrl_pin_rev.clr & PIN_EXTINT)))
 		return;
 
-	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW);
+	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY |
+		   CPU_BASED_TPR_SHADOW);
 	vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VINTD);
 	vmcs_write(PIN_CONTROLS, pin & ~PIN_EXTINT);
 	report_prefix_pushf("Virtualize interrupt-delivery disabled; external-interrupt exiting disabled");
@@ -4086,7 +4094,8 @@ static void test_posted_intr(void)
 	    (ctrl_exit_rev.clr & EXI_INTA)))
 		return;
 
-	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW);
+	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY |
+		   CPU_BASED_TPR_SHADOW);
 
 	/*
 	 * Test virtual-interrupt-delivery and acknowledge-interrupt-on-exit
@@ -4237,7 +4246,7 @@ static void try_tpr_threshold_and_vtpr(unsigned threshold, unsigned vtpr)
 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
 
-	if ((primary & CPU_TPR_SHADOW) &&
+	if ((primary & CPU_BASED_TPR_SHADOW) &&
 	    (!(primary & CPU_SECONDARY) ||
 	     !(secondary & (CPU_VINTD | CPU_VIRT_APIC_ACCESSES))))
 		valid = (threshold & 0xf) <= ((vtpr >> 4) & 0xf);
@@ -4571,7 +4580,7 @@ static void try_tpr_threshold(unsigned threshold)
 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
 
-	if ((primary & CPU_TPR_SHADOW) && !((primary & CPU_SECONDARY) &&
+	if ((primary & CPU_BASED_TPR_SHADOW) && !((primary & CPU_SECONDARY) &&
 	    (secondary & CPU_VINTD)))
 		valid = !(threshold >> 4);
 
@@ -4627,18 +4636,20 @@ static void test_tpr_threshold(void)
 	u64 threshold = vmcs_read(TPR_THRESHOLD);
 	void *virtual_apic_page;
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW))
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_TPR_SHADOW))
 		return;
 
 	virtual_apic_page = alloc_page();
 	memset(virtual_apic_page, 0xff, PAGE_SIZE);
 	vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page));
 
-	vmcs_write(CPU_EXEC_CTRL0, primary & ~(CPU_TPR_SHADOW | CPU_SECONDARY));
+	vmcs_write(CPU_EXEC_CTRL0, primary & ~(CPU_BASED_TPR_SHADOW |
+		   CPU_SECONDARY));
 	report_prefix_pushf("Use TPR shadow disabled, secondary controls disabled");
 	test_tpr_threshold_values();
 	report_prefix_pop();
-	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | CPU_TPR_SHADOW);
+	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) |
+		   CPU_BASED_TPR_SHADOW);
 	report_prefix_pushf("Use TPR shadow enabled, secondary controls disabled");
 	test_tpr_threshold_values();
 	report_prefix_pop();
@@ -4727,7 +4738,7 @@ static void test_nmi_ctrls(void)
 	cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0);
 
 	test_pin_ctrls = pin_ctrls & ~(PIN_NMI | PIN_VIRT_NMI);
-	test_cpu_ctrls0 = cpu_ctrls0 & ~CPU_NMI_WINDOW;
+	test_cpu_ctrls0 = cpu_ctrls0 & ~CPU_BASED_NMI_WINDOW_EXITING;
 
 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
 	report_prefix_pushf("NMI-exiting disabled, virtual-NMIs disabled");
@@ -4749,13 +4760,14 @@ static void test_nmi_ctrls(void)
 	test_vmx_valid_controls();
 	report_prefix_pop();
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) {
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_NMI_WINDOW_EXITING)) {
 		report_info("NMI-window exiting is not supported, skipping...");
 		goto done;
 	}
 
 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
-	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW);
+	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 |
+		   CPU_BASED_NMI_WINDOW_EXITING);
 	report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting enabled");
 	test_vmx_invalid_controls();
 	report_prefix_pop();
@@ -4767,7 +4779,8 @@ static void test_nmi_ctrls(void)
 	report_prefix_pop();
 
 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI));
-	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW);
+	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 |
+		   CPU_BASED_NMI_WINDOW_EXITING);
 	report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting enabled");
 	test_vmx_valid_controls();
 	report_prefix_pop();
@@ -5121,14 +5134,14 @@ static void enable_mtf(void)
 {
 	u32 ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
 
-	vmcs_write(CPU_EXEC_CTRL0, ctrl0 | CPU_MTF);
+	vmcs_write(CPU_EXEC_CTRL0, ctrl0 | CPU_BASED_MONITOR_TRAP_FLAG);
 }
 
 static void disable_mtf(void)
 {
 	u32 ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
 
-	vmcs_write(CPU_EXEC_CTRL0, ctrl0 & ~CPU_MTF);
+	vmcs_write(CPU_EXEC_CTRL0, ctrl0 & ~CPU_BASED_MONITOR_TRAP_FLAG);
 }
 
 static void enable_tf(void)
@@ -5159,7 +5172,7 @@ static void vmx_mtf_test(void)
 	unsigned long pending_dbg;
 	handler old_gp, old_db;
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_MTF)) {
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_MONITOR_TRAP_FLAG)) {
 		report_skip("%s : \"Monitor trap flag\" exec control not supported", __func__);
 		return;
 	}
@@ -5262,7 +5275,7 @@ static void vmx_mtf_pdpte_test(void)
 	if (setup_ept(false))
 		return;
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_MTF)) {
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_MONITOR_TRAP_FLAG)) {
 		report_skip("%s : \"Monitor trap flag\" exec control not supported", __func__);
 		return;
 	}
@@ -6185,13 +6198,13 @@ static enum Config_type configure_apic_reg_virt_test(
 	}
 
 	if (apic_reg_virt_config->use_tpr_shadow) {
-		if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) {
+		if (!(ctrl_cpu_rev[0].clr & CPU_BASED_TPR_SHADOW)) {
 			printf("VM-execution control \"use TPR shadow\" NOT supported.\n");
 			return CONFIG_TYPE_UNSUPPORTED;
 		}
-		cpu_exec_ctrl0 |= CPU_TPR_SHADOW;
+		cpu_exec_ctrl0 |= CPU_BASED_TPR_SHADOW;
 	} else {
-		cpu_exec_ctrl0 &= ~CPU_TPR_SHADOW;
+		cpu_exec_ctrl0 &= ~CPU_BASED_TPR_SHADOW;
 	}
 
 	if (apic_reg_virt_config->apic_register_virtualization) {
@@ -6968,9 +6981,9 @@ static enum Config_type configure_virt_x2apic_mode_test(
 	/* x2apic-specific VMCS config */
 	if (virt_x2apic_mode_config->use_msr_bitmaps) {
 		/* virt_x2apic_mode_test() checks for MSR bitmaps support */
-		cpu_exec_ctrl0 |= CPU_MSR_BITMAP;
+		cpu_exec_ctrl0 |= CPU_BASED_USE_MSR_BITMAPS;
 	} else {
-		cpu_exec_ctrl0 &= ~CPU_MSR_BITMAP;
+		cpu_exec_ctrl0 &= ~CPU_BASED_USE_MSR_BITMAPS;
 	}
 
 	if (virt_x2apic_mode_config->virtual_interrupt_delivery) {
@@ -7035,10 +7048,10 @@ static void virt_x2apic_mode_test(void)
 	 *   - "Virtual-APIC address", indicated by "use TPR shadow"
 	 *   - "MSR-bitmap address", indicated by "use MSR bitmaps"
 	 */
-	if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) {
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_TPR_SHADOW)) {
 		report_skip("%s : \"Use TPR shadow\" exec control not supported", __func__);
 		return;
-	} else if (!(ctrl_cpu_rev[0].clr & CPU_MSR_BITMAP)) {
+	} else if (!(ctrl_cpu_rev[0].clr & CPU_BASED_USE_MSR_BITMAPS)) {
 		report_skip("%s : \"Use MSR bitmaps\" exec control not supported", __func__);
 		return;
 	}
@@ -8673,7 +8686,7 @@ static void vmx_nmi_window_test(void)
 		return;
 	}
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) {
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_NMI_WINDOW_EXITING)) {
 		report_skip("%s : \"NMI-window exiting\" exec control not supported", __func__);
 		return;
 	}
@@ -8692,7 +8705,7 @@ static void vmx_nmi_window_test(void)
 	 * RIP will not advance.
 	 */
 	report_prefix_push("active, no blocking");
-	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW);
+	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_BASED_NMI_WINDOW_EXITING);
 	enter_guest();
 	verify_nmi_window_exit(nop_addr);
 	report_prefix_pop();
@@ -8764,7 +8777,7 @@ static void vmx_nmi_window_test(void)
 		report_prefix_pop();
 	}
 
-	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW);
+	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_BASED_NMI_WINDOW_EXITING);
 	enter_guest();
 	report_prefix_pop();
 }
@@ -8804,7 +8817,7 @@ static void vmx_intr_window_test(void)
 	unsigned int orig_db_gate_type;
 	void *db_fault_addr = get_idt_addr(&boot_idt[DB_VECTOR]);
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_INTR_WINDOW)) {
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_INTR_WINDOW_EXITING)) {
 		report_skip("%s : \"Interrupt-window exiting\" exec control not supported", __func__);
 		return;
 	}
@@ -8830,7 +8843,7 @@ static void vmx_intr_window_test(void)
 	 * point to the vmcall instruction.
 	 */
 	report_prefix_push("active, no blocking, RFLAGS.IF=1");
-	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
+	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_BASED_INTR_WINDOW_EXITING);
 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_IF);
 	enter_guest();
 	verify_intr_window_exit(vmcall_addr);
@@ -8857,11 +8870,11 @@ static void vmx_intr_window_test(void)
 	 * VM-exits. Then, advance past the VMCALL and set the
 	 * "interrupt-window exiting" VM-execution control again.
 	 */
-	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
+	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_BASED_INTR_WINDOW_EXITING);
 	enter_guest();
 	skip_exit_vmcall();
 	nop_addr = vmcs_read(GUEST_RIP);
-	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
+	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_BASED_INTR_WINDOW_EXITING);
 
 	/*
 	 * Ask for "interrupt-window exiting" in a MOV-SS shadow with
@@ -8932,7 +8945,7 @@ static void vmx_intr_window_test(void)
 	}
 
 	boot_idt[DB_VECTOR].type = orig_db_gate_type;
-	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
+	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_BASED_INTR_WINDOW_EXITING);
 	enter_guest();
 	report_prefix_pop();
 }
@@ -8956,14 +8969,14 @@ static void vmx_store_tsc_test(void)
 	struct vmx_msr_entry msr_entry = { .index = MSR_IA32_TSC };
 	u64 low, high;
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET)) {
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_USE_TSC_OFFSETTING)) {
 		report_skip("%s : \"Use TSC offsetting\" exec control not supported", __func__);
 		return;
 	}
 
 	test_set_guest(vmx_store_tsc_test_guest);
 
-	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_USE_TSC_OFFSET);
+	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_BASED_USE_TSC_OFFSETTING);
 	vmcs_write(EXI_MSR_ST_CNT, 1);
 	vmcs_write(EXIT_MSR_ST_ADDR, virt_to_phys(&msr_entry));
 	vmcs_write(TSC_OFFSET, GUEST_TSC_OFFSET);
@@ -9506,7 +9519,7 @@ static void enable_vid(void)
 	vmcs_write(EOI_EXIT_BITMAP2, 0x0);
 	vmcs_write(EOI_EXIT_BITMAP3, 0x0);
 
-	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY | CPU_TPR_SHADOW);
+	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY | CPU_BASED_TPR_SHADOW);
 	vmcs_set_bits(CPU_EXEC_CTRL1, CPU_VINTD | CPU_VIRT_X2APIC);
 }
 
@@ -10388,7 +10401,7 @@ static void vmx_vmcs_shadow_test(void)
 	shadow->hdr.shadow_vmcs = 1;
 	TEST_ASSERT(!vmcs_clear(shadow));
 
-	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_RDTSC);
+	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_BASED_RDTSC_EXITING);
 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY);
 	vmcs_set_bits(CPU_EXEC_CTRL1, CPU_SHADOW_VMCS);
 
@@ -10423,7 +10436,7 @@ static void vmx_vmcs_shadow_test(void)
  */
 static void reset_guest_tsc_to_zero(void)
 {
-	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_USE_TSC_OFFSET);
+	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_BASED_USE_TSC_OFFSETTING);
 	vmcs_write(TSC_OFFSET, -rdtsc());
 }
 
@@ -10446,7 +10459,7 @@ static unsigned long long host_time_to_guest_time(unsigned long long t)
 	TEST_ASSERT(!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
 		    !(vmcs_read(CPU_EXEC_CTRL1) & CPU_USE_TSC_SCALING));
 
-	if (vmcs_read(CPU_EXEC_CTRL0) & CPU_USE_TSC_OFFSET)
+	if (vmcs_read(CPU_EXEC_CTRL0) & CPU_BASED_USE_TSC_OFFSETTING)
 		t += vmcs_read(TSC_OFFSET);
 
 	return t;
@@ -10470,7 +10483,7 @@ static void rdtsc_vmexit_diff_test(void)
 	int fail = 0;
 	int i;
 
-	if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET))
+	if (!(ctrl_cpu_rev[0].clr & CPU_BASED_USE_TSC_OFFSETTING))
 		test_skip("CPU doesn't support the 'use TSC offsetting' processor-based VM-execution control.\n");
 
 	test_set_guest(rdtsc_vmexit_diff_test_guest);
@@ -10691,9 +10704,9 @@ static void __vmx_pf_exception_test(invalidate_tlb_t inv_fn, void *data,
 
 	/* Intercept INVLPG when to perform TLB invalidation from L1 (this). */
 	if (inv_fn)
-		vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INVLPG);
+		vmcs_set_bits(CPU_EXEC_CTRL0, CPU_BASED_INVLPG_EXITING);
 	else
-		vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INVLPG);
+		vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_BASED_INVLPG_EXITING);
 
 	enter_guest();
 
-- 
2.43.0


  parent reply	other threads:[~2025-09-16 16:45 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-16 17:22 [kvm-unit-tests PATCH 00/17] x86/vmx: align with Linux kernel VMX definitions Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 01/17] lib: add linux vmx.h clone from 6.16 Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 02/17] lib: add linux trapnr.h " Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 03/17] lib: add vmxfeatures.h " Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 04/17] lib: define __aligned() in compiler.h Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 05/17] x86/vmx: basic integration for new vmx.h Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 06/17] x86/vmx: switch to new vmx.h EPT violation defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 07/17] x86/vmx: switch to new vmx.h EPT RWX defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 08/17] x86/vmx: switch to new vmx.h EPT access and dirty defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 09/17] x86/vmx: switch to new vmx.h EPT capability and memory type defs Jon Kohler
2025-09-16 17:22 ` Jon Kohler [this message]
2025-09-16 17:22 ` [kvm-unit-tests PATCH 11/17] x86/vmx: switch to new vmx.h secondary execution control bit Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 12/17] x86/vmx: switch to new vmx.h secondary execution controls Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 13/17] x86/vmx: switch to new vmx.h pin based VM-execution controls Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 14/17] x86/vmx: switch to new vmx.h exit controls Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 15/17] x86/vmx: switch to new vmx.h entry controls Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 16/17] x86/vmx: switch to new vmx.h interrupt defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 17/17] x86/vmx: align exit reasons with Linux uapi Jon Kohler
2025-11-12 19:02 ` [kvm-unit-tests PATCH 00/17] x86/vmx: align with Linux kernel VMX definitions Sean Christopherson
2025-11-14 14:52   ` Jon Kohler
2025-11-17 17:41     ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250916172247.610021-11-jon@nutanix.com \
    --to=jon@nutanix.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox