From: Jon Kohler <jon@nutanix.com>
To: seanjc@google.com, pbonzini@redhat.com, kvm@vger.kernel.org
Cc: Jon Kohler <jon@nutanix.com>
Subject: [kvm-unit-tests PATCH 15/17] x86/vmx: switch to new vmx.h entry controls
Date: Tue, 16 Sep 2025 10:22:44 -0700 [thread overview]
Message-ID: <20250916172247.610021-16-jon@nutanix.com> (raw)
In-Reply-To: <20250916172247.610021-1-jon@nutanix.com>
Migrate to new vmx.h's entry controls, which makes it easier to grok
from one code base to another.
No functional change intended.
Signed-off-by: Jon Kohler <jon@nutanix.com>
---
x86/vmx.c | 6 +--
x86/vmx.h | 9 -----
x86/vmx_tests.c | 97 ++++++++++++++++++++++++++-----------------------
3 files changed, 55 insertions(+), 57 deletions(-)
diff --git a/x86/vmx.c b/x86/vmx.c
index bd16e833..7be93a72 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -1167,11 +1167,11 @@ static void init_vmcs_guest(void)
guest_cr0 = read_cr0();
guest_cr4 = read_cr4();
guest_cr3 = read_cr3();
- if (ctrl_enter & ENT_GUEST_64) {
+ if (ctrl_enter & VM_ENTRY_IA32E_MODE) {
guest_cr0 |= X86_CR0_PG;
guest_cr4 |= X86_CR4_PAE;
}
- if ((ctrl_enter & ENT_GUEST_64) == 0)
+ if ((ctrl_enter & VM_ENTRY_IA32E_MODE) == 0)
guest_cr4 &= (~X86_CR4_PCIDE);
if (guest_cr0 & X86_CR0_PG)
guest_cr0 |= X86_CR0_PE;
@@ -1260,7 +1260,7 @@ int init_vmcs(struct vmcs **vmcs)
ctrl_exit = VM_EXIT_LOAD_IA32_EFER |
VM_EXIT_HOST_ADDR_SPACE_SIZE |
VM_EXIT_LOAD_IA32_PAT;
- ctrl_enter = (ENT_LOAD_EFER | ENT_GUEST_64);
+ ctrl_enter = (VM_ENTRY_LOAD_IA32_EFER | VM_ENTRY_IA32E_MODE);
/* DIsable IO instruction VMEXIT now */
ctrl_cpu[0] &= (~(CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_USE_IO_BITMAPS));
ctrl_cpu[1] = 0;
diff --git a/x86/vmx.h b/x86/vmx.h
index 30503ff4..8bb49d8e 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -406,15 +406,6 @@ enum Reason {
VMX_XRSTORS = 64,
};
-enum Ctrl_ent {
- ENT_LOAD_DBGCTLS = 1UL << 2,
- ENT_GUEST_64 = 1UL << 9,
- ENT_LOAD_PERF = 1UL << 13,
- ENT_LOAD_PAT = 1UL << 14,
- ENT_LOAD_EFER = 1UL << 15,
- ENT_LOAD_BNDCFGS = 1UL << 16
-};
-
enum Intr_type {
VMX_INTR_TYPE_EXT_INTR = 0,
VMX_INTR_TYPE_NMI_INTR = 2,
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 77a63a3e..2f9858a3 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -316,14 +316,14 @@ static int test_ctrl_pat_init(struct vmcs *vmcs)
msr_bmp_init();
if (!(ctrl_exit_rev.clr & VM_EXIT_SAVE_IA32_PAT) &&
!(ctrl_exit_rev.clr & VM_EXIT_LOAD_IA32_PAT) &&
- !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
+ !(ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_PAT)) {
printf("\tSave/load PAT is not supported\n");
return 1;
}
ctrl_ent = vmcs_read(ENT_CONTROLS);
ctrl_exi = vmcs_read(EXI_CONTROLS);
- ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT;
+ ctrl_ent |= ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_PAT;
ctrl_exi |= ctrl_exit_rev.clr & (VM_EXIT_SAVE_IA32_PAT |
VM_EXIT_LOAD_IA32_PAT);
vmcs_write(ENT_CONTROLS, ctrl_ent);
@@ -339,7 +339,7 @@ static void test_ctrl_pat_main(void)
u64 guest_ia32_pat;
guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
- if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
+ if (!(ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_PAT))
printf("\tENT_LOAD_PAT is not supported.\n");
else {
if (guest_ia32_pat != 0) {
@@ -350,7 +350,7 @@ static void test_ctrl_pat_main(void)
wrmsr(MSR_IA32_CR_PAT, 0x6);
vmcall();
guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
- if (ctrl_enter_rev.clr & ENT_LOAD_PAT)
+ if (ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_PAT)
report(guest_ia32_pat == ia32_pat, "Entry load PAT");
}
@@ -390,7 +390,7 @@ static int test_ctrl_efer_init(struct vmcs *vmcs)
u64 ctrl_exi;
msr_bmp_init();
- ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
+ ctrl_ent = vmcs_read(ENT_CONTROLS) | VM_ENTRY_LOAD_IA32_EFER;
ctrl_exi = vmcs_read(EXI_CONTROLS) |
VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_LOAD_IA32_EFER;
@@ -407,7 +407,7 @@ static void test_ctrl_efer_main(void)
u64 guest_ia32_efer;
guest_ia32_efer = rdmsr(MSR_EFER);
- if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
+ if (!(ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_EFER))
printf("\tENT_LOAD_EFER is not supported.\n");
else {
if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
@@ -418,7 +418,7 @@ static void test_ctrl_efer_main(void)
wrmsr(MSR_EFER, ia32_efer);
vmcall();
guest_ia32_efer = rdmsr(MSR_EFER);
- if (ctrl_enter_rev.clr & ENT_LOAD_EFER)
+ if (ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_EFER)
report(guest_ia32_efer == ia32_efer, "Entry load EFER");
}
@@ -1922,7 +1922,8 @@ static int dbgctls_init(struct vmcs *vmcs)
vmcs_write(GUEST_DR7, 0x404);
vmcs_write(GUEST_DEBUGCTL, 0x2);
- vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
+ vmcs_write(ENT_CONTROLS,
+ vmcs_read(ENT_CONTROLS) | VM_ENTRY_LOAD_DEBUG_CONTROLS);
vmcs_write(EXI_CONTROLS,
vmcs_read(EXI_CONTROLS) | VM_EXIT_SAVE_DEBUG_CONTROLS);
@@ -1947,7 +1948,7 @@ static void dbgctls_main(void)
vmcall();
report(vmx_get_test_stage() == 1, "Save debug controls");
- if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS ||
+ if (ctrl_enter_rev.set & VM_ENTRY_LOAD_DEBUG_CONTROLS ||
ctrl_exit_rev.set & VM_EXIT_SAVE_DEBUG_CONTROLS) {
printf("\tDebug controls are always loaded/saved\n");
return;
@@ -1998,7 +1999,8 @@ static int dbgctls_exit_handler(union exit_reason exit_reason)
vmcs_write(GUEST_DEBUGCTL, 0x2);
vmcs_write(ENT_CONTROLS,
- vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS);
+ vmcs_read(ENT_CONTROLS) &
+ ~VM_ENTRY_LOAD_DEBUG_CONTROLS);
vmcs_write(EXI_CONTROLS,
vmcs_read(EXI_CONTROLS) &
~VM_EXIT_SAVE_DEBUG_CONTROLS);
@@ -5382,7 +5384,7 @@ static void vmx_mtf_pdpte_test(void)
* when the guest started out in long mode.
*/
ent_ctls = vmcs_read(ENT_CONTROLS);
- vmcs_write(ENT_CONTROLS, ent_ctls & ~ENT_GUEST_64);
+ vmcs_write(ENT_CONTROLS, ent_ctls & ~VM_ENTRY_IA32E_MODE);
guest_efer = vmcs_read(GUEST_EFER);
vmcs_write(GUEST_EFER, guest_efer & ~(EFER_LMA | EFER_LME));
@@ -7299,11 +7301,11 @@ static void test_efer_one(u32 fld, const char * fld_name, u64 efer,
if (!!(efer & EFER_LME) != !!(ctrl & VM_EXIT_HOST_ADDR_SPACE_SIZE))
ok = false;
}
- if (ctrl_fld == ENT_CONTROLS && (ctrl & ENT_LOAD_EFER)) {
+ if (ctrl_fld == ENT_CONTROLS && (ctrl & VM_ENTRY_LOAD_IA32_EFER)) {
/* Check LMA too since CR0.PG is set. */
- if (!!(efer & EFER_LMA) != !!(ctrl & ENT_GUEST_64))
+ if (!!(efer & EFER_LMA) != !!(ctrl & VM_ENTRY_IA32E_MODE))
ok = false;
- if (!!(efer & EFER_LME) != !!(ctrl & ENT_GUEST_64))
+ if (!!(efer & EFER_LME) != !!(ctrl & VM_ENTRY_IA32E_MODE))
ok = false;
}
@@ -7312,7 +7314,7 @@ static void test_efer_one(u32 fld, const char * fld_name, u64 efer,
* Perhaps write the test in assembly and make sure it
* can be run in either mode?
*/
- if (fld == GUEST_EFER && ok && !(ctrl & ENT_GUEST_64))
+ if (fld == GUEST_EFER && ok && !(ctrl & VM_ENTRY_IA32E_MODE))
return;
vmcs_write(ctrl_fld, ctrl);
@@ -7446,15 +7448,15 @@ static void test_host_efer(void)
*/
static void test_guest_efer(void)
{
- if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER)) {
+ if (!(ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_EFER)) {
report_skip("%s : \"Load-IA32-EFER\" entry control not supported", __func__);
return;
}
vmcs_write(GUEST_EFER, rdmsr(MSR_EFER));
test_efer(GUEST_EFER, "GUEST_EFER", ENT_CONTROLS,
- ctrl_enter_rev.clr & ENT_LOAD_EFER,
- ENT_GUEST_64);
+ ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_EFER,
+ VM_ENTRY_IA32E_MODE);
}
/*
@@ -7487,8 +7489,8 @@ static void test_pat(u32 field, const char * field_name, u32 ctrl_field,
report_prefix_pop();
} else { // GUEST_PAT
- test_guest_state("ENT_LOAD_PAT disabled", false,
- val, "GUEST_PAT");
+ test_guest_state("VM_ENTRY_LOAD_IA32_PAT disabled",
+ false, val, "GUEST_PAT");
}
}
}
@@ -7520,7 +7522,7 @@ static void test_pat(u32 field, const char * field_name, u32 ctrl_field,
} else { // GUEST_PAT
error = (i == 0x2 || i == 0x3 || i >= 0x8);
- test_guest_state("ENT_LOAD_PAT enabled", !!error,
+ test_guest_state("VM_ENTRY_LOAD_IA32_PAT enabled", !!error,
val, "GUEST_PAT");
if (!(ctrl_exit_rev.clr & VM_EXIT_LOAD_IA32_PAT))
@@ -7725,13 +7727,14 @@ static void test_load_guest_perf_global_ctrl(void)
return;
}
- if (!(ctrl_enter_rev.clr & ENT_LOAD_PERF)) {
+ if (!(ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)) {
report_skip("%s : \"Load IA32_PERF_GLOBAL_CTRL\" entry control not supported", __func__);
return;
}
test_perf_global_ctrl(GUEST_PERF_GLOBAL_CTRL, "GUEST_PERF_GLOBAL_CTRL",
- ENT_CONTROLS, "ENT_CONTROLS", ENT_LOAD_PERF);
+ ENT_CONTROLS, "ENT_CONTROLS",
+ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
}
@@ -7912,7 +7915,7 @@ static void test_host_addr_size(void)
assert(vmcs_read(EXI_CONTROLS) & VM_EXIT_HOST_ADDR_SPACE_SIZE);
assert(cr4_saved & X86_CR4_PAE);
- vmcs_write(ENT_CONTROLS, entry_ctrl_saved | ENT_GUEST_64);
+ vmcs_write(ENT_CONTROLS, entry_ctrl_saved | VM_ENTRY_IA32E_MODE);
report_prefix_pushf("\"IA-32e mode guest\" enabled");
test_vmx_vmlaunch(0);
report_prefix_pop();
@@ -7935,7 +7938,7 @@ static void test_host_addr_size(void)
test_vmx_vmlaunch_must_fail(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
report_prefix_pop();
- vmcs_write(ENT_CONTROLS, entry_ctrl_saved | ENT_GUEST_64);
+ vmcs_write(ENT_CONTROLS, entry_ctrl_saved | VM_ENTRY_IA32E_MODE);
vmcs_write(HOST_RIP, rip_saved);
vmcs_write(HOST_CR4, cr4_saved);
@@ -7994,22 +7997,22 @@ static void test_guest_dr7(void)
u64 val;
int i;
- if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS) {
- vmcs_clear_bits(ENT_CONTROLS, ENT_LOAD_DBGCTLS);
+ if (ctrl_enter_rev.set & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+ vmcs_clear_bits(ENT_CONTROLS, VM_ENTRY_LOAD_DEBUG_CONTROLS);
for (i = 0; i < 64; i++) {
val = 1ull << i;
vmcs_write(GUEST_DR7, val);
- test_guest_state("ENT_LOAD_DBGCTLS disabled", false,
- val, "GUEST_DR7");
+ test_guest_state("VM_ENTRY_LOAD_DEBUG_CONTROLS disabled",
+ false, val, "GUEST_DR7");
}
}
- if (ctrl_enter_rev.clr & ENT_LOAD_DBGCTLS) {
- vmcs_set_bits(ENT_CONTROLS, ENT_LOAD_DBGCTLS);
+ if (ctrl_enter_rev.clr & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+ vmcs_set_bits(ENT_CONTROLS, VM_ENTRY_LOAD_DEBUG_CONTROLS);
for (i = 0; i < 64; i++) {
val = 1ull << i;
vmcs_write(GUEST_DR7, val);
- test_guest_state("ENT_LOAD_DBGCTLS enabled", i >= 32,
- val, "GUEST_DR7");
+ test_guest_state("VM_ENTRY_LOAD_DEBUG_CONTROLS enabled",
+ i >= 32, val, "GUEST_DR7");
}
}
vmcs_write(GUEST_DR7, dr7_saved);
@@ -8030,12 +8033,13 @@ static void test_load_guest_pat(void)
/*
* "load IA32_PAT" VM-entry control
*/
- if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
+ if (!(ctrl_enter_rev.clr & VM_ENTRY_LOAD_IA32_PAT)) {
report_skip("%s : \"Load-IA32-PAT\" entry control not supported", __func__);
return;
}
- test_pat(GUEST_PAT, "GUEST_PAT", ENT_CONTROLS, ENT_LOAD_PAT);
+ test_pat(GUEST_PAT, "GUEST_PAT", ENT_CONTROLS,
+ VM_ENTRY_LOAD_IA32_PAT);
}
#define MSR_IA32_BNDCFGS_RSVD_MASK 0x00000ffc
@@ -8054,29 +8058,29 @@ static void test_load_guest_bndcfgs(void)
u64 bndcfgs_saved = vmcs_read(GUEST_BNDCFGS);
u64 bndcfgs;
- if (!(ctrl_enter_rev.clr & ENT_LOAD_BNDCFGS)) {
+ if (!(ctrl_enter_rev.clr & VM_ENTRY_LOAD_BNDCFGS)) {
report_skip("%s : \"Load-IA32-BNDCFGS\" entry control not supported", __func__);
return;
}
- vmcs_clear_bits(ENT_CONTROLS, ENT_LOAD_BNDCFGS);
+ vmcs_clear_bits(ENT_CONTROLS, VM_ENTRY_LOAD_BNDCFGS);
vmcs_write(GUEST_BNDCFGS, NONCANONICAL);
- test_guest_state("ENT_LOAD_BNDCFGS disabled", false,
+ test_guest_state("VM_ENTRY_LOAD_BNDCFGS disabled", false,
GUEST_BNDCFGS, "GUEST_BNDCFGS");
bndcfgs = bndcfgs_saved | MSR_IA32_BNDCFGS_RSVD_MASK;
vmcs_write(GUEST_BNDCFGS, bndcfgs);
- test_guest_state("ENT_LOAD_BNDCFGS disabled", false,
+ test_guest_state("VM_ENTRY_LOAD_BNDCFGS disabled", false,
GUEST_BNDCFGS, "GUEST_BNDCFGS");
- vmcs_set_bits(ENT_CONTROLS, ENT_LOAD_BNDCFGS);
+ vmcs_set_bits(ENT_CONTROLS, VM_ENTRY_LOAD_BNDCFGS);
vmcs_write(GUEST_BNDCFGS, NONCANONICAL);
- test_guest_state("ENT_LOAD_BNDCFGS enabled", true,
+ test_guest_state("VM_ENTRY_LOAD_BNDCFGS enabled", true,
GUEST_BNDCFGS, "GUEST_BNDCFGS");
bndcfgs = bndcfgs_saved | MSR_IA32_BNDCFGS_RSVD_MASK;
vmcs_write(GUEST_BNDCFGS, bndcfgs);
- test_guest_state("ENT_LOAD_BNDCFGS enabled", true,
+ test_guest_state("VM_ENTRY_LOAD_BNDCFGS enabled", true,
GUEST_BNDCFGS, "GUEST_BNDCFGS");
vmcs_write(GUEST_BNDCFGS, bndcfgs_saved);
@@ -8335,7 +8339,8 @@ asm (".code32\n"
static void setup_unrestricted_guest(void)
{
vmcs_write(GUEST_CR0, vmcs_read(GUEST_CR0) & ~(X86_CR0_PG));
- vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) & ~ENT_GUEST_64);
+ vmcs_write(ENT_CONTROLS,
+ vmcs_read(ENT_CONTROLS) & ~VM_ENTRY_IA32E_MODE);
vmcs_write(GUEST_EFER, vmcs_read(GUEST_EFER) & ~EFER_LMA);
vmcs_write(GUEST_RIP, virt_to_phys(unrestricted_guest_main));
}
@@ -8343,7 +8348,8 @@ static void setup_unrestricted_guest(void)
static void unsetup_unrestricted_guest(void)
{
vmcs_write(GUEST_CR0, vmcs_read(GUEST_CR0) | X86_CR0_PG);
- vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_GUEST_64);
+ vmcs_write(ENT_CONTROLS,
+ vmcs_read(ENT_CONTROLS) | VM_ENTRY_IA32E_MODE);
vmcs_write(GUEST_EFER, vmcs_read(GUEST_EFER) | EFER_LMA);
vmcs_write(GUEST_RIP, (u64) phys_to_virt(vmcs_read(GUEST_RIP)));
vmcs_write(GUEST_RSP, (u64) phys_to_virt(vmcs_read(GUEST_RSP)));
@@ -9563,7 +9569,8 @@ static void vmx_db_test(void)
*/
if (this_cpu_has(X86_FEATURE_RTM)) {
vmcs_write(ENT_CONTROLS,
- vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
+ vmcs_read(ENT_CONTROLS) |
+ VM_ENTRY_LOAD_DEBUG_CONTROLS);
/*
* Set DR7.RTM[bit 11] and IA32_DEBUGCTL.RTM[bit 15]
* in the guest to enable advanced debugging of RTM
--
2.43.0
next prev parent reply other threads:[~2025-09-16 16:44 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-16 17:22 [kvm-unit-tests PATCH 00/17] x86/vmx: align with Linux kernel VMX definitions Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 01/17] lib: add linux vmx.h clone from 6.16 Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 02/17] lib: add linux trapnr.h " Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 03/17] lib: add vmxfeatures.h " Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 04/17] lib: define __aligned() in compiler.h Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 05/17] x86/vmx: basic integration for new vmx.h Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 06/17] x86/vmx: switch to new vmx.h EPT violation defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 07/17] x86/vmx: switch to new vmx.h EPT RWX defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 08/17] x86/vmx: switch to new vmx.h EPT access and dirty defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 09/17] x86/vmx: switch to new vmx.h EPT capability and memory type defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 10/17] x86/vmx: switch to new vmx.h primary processor-based VM-execution controls Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 11/17] x86/vmx: switch to new vmx.h secondary execution control bit Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 12/17] x86/vmx: switch to new vmx.h secondary execution controls Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 13/17] x86/vmx: switch to new vmx.h pin based VM-execution controls Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 14/17] x86/vmx: switch to new vmx.h exit controls Jon Kohler
2025-09-16 17:22 ` Jon Kohler [this message]
2025-09-16 17:22 ` [kvm-unit-tests PATCH 16/17] x86/vmx: switch to new vmx.h interrupt defs Jon Kohler
2025-09-16 17:22 ` [kvm-unit-tests PATCH 17/17] x86/vmx: align exit reasons with Linux uapi Jon Kohler
2025-11-12 19:02 ` [kvm-unit-tests PATCH 00/17] x86/vmx: align with Linux kernel VMX definitions Sean Christopherson
2025-11-14 14:52 ` Jon Kohler
2025-11-17 17:41 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250916172247.610021-16-jon@nutanix.com \
--to=jon@nutanix.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox