From: "Chang S. Bae" <chang.seok.bae@intel.com>
To: pbonzini@redhat.com, seanjc@google.com
Cc: kvm@vger.kernel.org, x86@kernel.org,
linux-kernel@vger.kernel.org, chao.gao@intel.com,
chang.seok.bae@intel.com
Subject: [PATCH v4 20/21] KVM: x86: selftests: Add APX state and ABI test
Date: Tue, 12 May 2026 01:15:01 +0000 [thread overview]
Message-ID: <20260512011502.53072-21-chang.seok.bae@intel.com> (raw)
In-Reply-To: <20260512011502.53072-1-chang.seok.bae@intel.com>
Test APX-specific behavior and ABI interactions as these are unique
comparing to other state components:
* EGPR state is saved on VM entry assembly (unlike other components).
* The saved state is retained even if the guest disables APX.
* EGPR state is exposed via the XSAVE ABI while not stored in the
kernel XSAVE buffer.
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
---
tools/testing/selftests/kvm/Makefile.kvm | 1 +
.../selftests/kvm/include/x86/processor.h | 120 +++++++++++
tools/testing/selftests/kvm/x86/apx_test.c | 191 ++++++++++++++++++
3 files changed, 312 insertions(+)
create mode 100644 tools/testing/selftests/kvm/x86/apx_test.c
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index 9118a5a51b89..7fcb1b88bdd6 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -158,6 +158,7 @@ TEST_GEN_PROGS_x86 += rseq_test
TEST_GEN_PROGS_x86 += steal_time
TEST_GEN_PROGS_x86 += system_counter_offset_test
TEST_GEN_PROGS_x86 += pre_fault_memory_test
+TEST_GEN_PROGS_x86 += x86/apx_test
# Compiled outputs used by test targets
TEST_GEN_PROGS_EXTENDED_x86 += x86/nx_huge_pages_test
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index 77f576ee7789..4c3cd65fce81 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -90,6 +90,7 @@ struct xstate {
#define XFEATURE_MASK_LBR BIT_ULL(15)
#define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
#define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
+#define XFEATURE_MASK_APX BIT_ULL(19)
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
@@ -177,6 +178,7 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
#define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
#define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
+#define X86_FEATURE_APX KVM_X86_CPU_FEATURE(0x7, 1, EDX, 21)
#define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
#define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
#define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
@@ -859,6 +861,124 @@ static inline void write_sse_reg(int reg, const sse128_t *data)
}
}
+static inline unsigned long read_egpr(int reg)
+{
+ unsigned long data = 0;
+
+ /* mov %r16..%r31, %rax */
+ switch (reg) {
+ case 16:
+ asm(".byte 0xd5, 0x48, 0x89, 0xc0" : "=a"(data));
+ break;
+ case 17:
+ asm(".byte 0xd5, 0x48, 0x89, 0xc8" : "=a"(data));
+ break;
+ case 18:
+ asm(".byte 0xd5, 0x48, 0x89, 0xd0" : "=a"(data));
+ break;
+ case 19:
+ asm(".byte 0xd5, 0x48, 0x89, 0xd8" : "=a"(data));
+ break;
+ case 20:
+ asm(".byte 0xd5, 0x48, 0x89, 0xe0" : "=a"(data));
+ break;
+ case 21:
+ asm(".byte 0xd5, 0x48, 0x89, 0xe8" : "=a"(data));
+ break;
+ case 22:
+ asm(".byte 0xd5, 0x48, 0x89, 0xf0" : "=a"(data));
+ break;
+ case 23:
+ asm(".byte 0xd5, 0x48, 0x89, 0xf8" : "=a"(data));
+ break;
+ case 24:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xc0" : "=a"(data));
+ break;
+ case 25:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xc8" : "=a"(data));
+ break;
+ case 26:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xd0" : "=a"(data));
+ break;
+ case 27:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xd8" : "=a"(data));
+ break;
+ case 28:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xe0" : "=a"(data));
+ break;
+ case 29:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xe8" : "=a"(data));
+ break;
+ case 30:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xf0" : "=a"(data));
+ break;
+ case 31:
+ asm(".byte 0xd5, 0x4c, 0x89, 0xf8" : "=a"(data));
+ break;
+ default:
+ BUG();
+ }
+
+ return data;
+}
+
+static inline void write_egpr(int reg, unsigned long data)
+{
+ /* mov %%rax, %r16...%r31*/
+ switch (reg) {
+ case 16:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc0" : : "a"(data));
+ break;
+ case 17:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc1" : : "a"(data));
+ break;
+ case 18:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc2" : : "a"(data));
+ break;
+ case 19:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc3" : : "a"(data));
+ break;
+ case 20:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc4" : : "a"(data));
+ break;
+ case 21:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc5" : : "a"(data));
+ break;
+ case 22:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc6" : : "a"(data));
+ break;
+ case 23:
+ asm(".byte 0xd5, 0x18, 0x89, 0xc7" : : "a"(data));
+ break;
+ case 24:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc0" : : "a"(data));
+ break;
+ case 25:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc1" : : "a"(data));
+ break;
+ case 26:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc2" : : "a"(data));
+ break;
+ case 27:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc3" : : "a"(data));
+ break;
+ case 28:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc4" : : "a"(data));
+ break;
+ case 29:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc5" : : "a"(data));
+ break;
+ case 30:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc6" : : "a"(data));
+ break;
+ case 31:
+ asm(".byte 0xd5, 0x19, 0x89, 0xc7" : : "a"(data));
+ break;
+ default:
+ BUG();
+ }
+}
+
static inline void cpu_relax(void)
{
asm volatile("rep; nop" ::: "memory");
diff --git a/tools/testing/selftests/kvm/x86/apx_test.c b/tools/testing/selftests/kvm/x86/apx_test.c
new file mode 100644
index 000000000000..796ed9eb6957
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/apx_test.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "processor.h"
+
+enum stages {
+ GUEST_UPDATE,
+ USERSPACE_UPDATE,
+ GUEST_APXOFF,
+};
+
+enum egpr_ops {
+ EGPRS_WRITE,
+ EGPRS_CHECK,
+};
+
+#define for_each_egpr(reg) for (reg = 16; reg <= 31; reg++)
+
+/*
+ * Deterministic per-stage test values for EGPRs so that guest and
+ * userspace can validate state transitions.
+ */
+static inline unsigned long egpr_data(enum stages stage, int reg)
+{
+ switch (stage) {
+ case GUEST_UPDATE:
+ return 0xabcd + reg;
+ case USERSPACE_UPDATE:
+ return 0xbcde + reg;
+ case GUEST_APXOFF:
+ return 0xcdef + reg;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Read/write or validate EGPR values either directly via registers
+ * (guest context) or via a provided buffer (userspace XSAVE).
+ */
+static bool handle_egprs(enum egpr_ops ops, unsigned long *egprs, enum stages stage)
+{
+ unsigned long data;
+ int reg;
+
+ for_each_egpr(reg) {
+ data = egpr_data(stage, reg);
+
+ if (ops == EGPRS_WRITE) {
+ if (egprs)
+ egprs[reg - 16] = data;
+ else
+ write_egpr(reg, data);
+ continue;
+ }
+
+ if (ops != EGPRS_CHECK)
+ return false;
+
+ if (egprs) {
+ if (egprs[reg - 16] != data)
+ return false;
+ continue;
+ }
+
+ if (read_egpr(reg) != data)
+ return false;
+ }
+
+ return true;
+}
+
+static void write_egprs(enum stages stage)
+{
+ handle_egprs(EGPRS_WRITE, NULL, stage);
+}
+
+static bool validate_egprs(enum stages stage)
+{
+ return handle_egprs(EGPRS_CHECK, NULL, stage);
+}
+
+static void test_guest_update(void)
+{
+ write_egprs(GUEST_UPDATE);
+ GUEST_SYNC(GUEST_UPDATE);
+ GUEST_ASSERT(validate_egprs(GUEST_UPDATE));
+}
+
+static void test_userspace_update(void)
+{
+ /* Userspace updates EGPR state via the KVM XSAVE ABI */
+ GUEST_SYNC(USERSPACE_UPDATE);
+ GUEST_ASSERT(validate_egprs(USERSPACE_UPDATE));
+}
+
+static void test_guest_apxoff(void)
+{
+ write_egprs(GUEST_APXOFF);
+ /* Disable APX to verify state is preserved */
+ GUEST_ASSERT(!xsetbv_safe(0, this_cpu_supported_xcr0() & ~XFEATURE_MASK_APX));
+ GUEST_SYNC(GUEST_APXOFF);
+ GUEST_ASSERT(!xsetbv_safe(0, this_cpu_supported_xcr0()));
+ GUEST_ASSERT(validate_egprs(GUEST_APXOFF));
+}
+
+static void guest_code(void)
+{
+ set_cr4(get_cr4() | X86_CR4_OSXSAVE);
+ GUEST_ASSERT(!xsetbv_safe(0, this_cpu_supported_xcr0()));
+
+ test_guest_update();
+ test_userspace_update();
+ test_guest_apxoff();
+
+ GUEST_DONE();
+}
+
+#define X86_PROPERTY_XSTATE_APX_OFFSET KVM_X86_CPU_PROPERTY(0xd, 19, EBX, 0, 31)
+#define XSAVE_HDR_OFFSET 512
+
+static inline unsigned long *xsave_egprs(void *xsave)
+{
+ return xsave + kvm_cpu_property(X86_PROPERTY_XSTATE_APX_OFFSET);
+}
+
+static inline void xstatebv_set(void *xsave, uint64_t mask)
+{
+ *(uint64_t *)(xsave + XSAVE_HDR_OFFSET) |= mask;
+}
+
+static void write_xsave_egprs(void *xsave, enum stages stage)
+{
+ handle_egprs(EGPRS_WRITE, xsave_egprs(xsave), stage);
+ xstatebv_set(xsave, XFEATURE_MASK_APX);
+}
+
+static bool validate_xsave_egprs(void *xsave, enum stages stage)
+{
+ return handle_egprs(EGPRS_CHECK, xsave_egprs(xsave), stage);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_xsave *xsave;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ enum stages stage;
+ struct ucall uc;
+ int xsave_size;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_APX));
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ xsave_size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
+ TEST_ASSERT(xsave_size, "KVM_CAP_XSAVE2 not supported");
+ xsave = malloc(xsave_size);
+ TEST_ASSERT(xsave, "Failed to allocate XSAVE buffer");
+
+ while (1) {
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_SYNC: {
+ stage = uc.args[1];
+ vcpu_xsave_get(vcpu, xsave);
+ if (stage == USERSPACE_UPDATE) {
+ write_xsave_egprs(xsave, stage);
+ } else {
+ TEST_ASSERT(validate_xsave_egprs(xsave, stage),
+ "EGPR state mismatch in userspace XSAVE buffer");
+ }
+ vcpu_xsave_set(vcpu, xsave);
+ break;
+ }
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ free(xsave);
+ kvm_vm_free(vm);
+ return 0;
+}
--
2.51.0
next prev parent reply other threads:[~2026-05-12 1:40 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-12 1:14 [PATCH v4 00/21] KVM: x86: Enable APX for guests Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 01/21] KVM: VMX: Macrofy GPR swapping in __vmx_vcpu_run() Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 02/21] KVM: SVM: Macrofy GPR swapping in __svm_vcpu_run() Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 03/21] KVM: SEV: Macrofy GPR swapping in __svm_sev_es_vcpu_run() Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 04/21] KVM: x86: Extend VCPU registers for EGPRs Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 05/21] KVM: VMX: Save guest EGPRs in VCPU cache Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 06/21] x86/fpu: Ignore APX when copying from/to guest FPU Chang S. Bae
2026-05-13 17:42 ` Paolo Bonzini
2026-05-13 19:10 ` Dave Hansen
2026-05-14 16:04 ` Paolo Bonzini
2026-05-14 17:17 ` Dave Hansen
2026-05-14 17:33 ` Paolo Bonzini
2026-05-15 2:04 ` Chang S. Bae
2026-05-15 8:18 ` Paolo Bonzini
2026-05-12 1:14 ` [PATCH v4 07/21] KVM: x86: Support APX state for XSAVE ABI Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 08/21] KVM: VMX: Refactor VMX instruction information access Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 09/21] KVM: VMX: Refactor instruction information decoding Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 10/21] KVM: VMX: Refactor register index retrieval from exit qualification Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 11/21] KVM: VMX: Support instruction information extension Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 12/21] KVM: nVMX: Propagate the extended instruction info field Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 13/21] KVM: x86: Support EGPR accessing and tracking for emulator Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 14/21] KVM: x86: Handle EGPR index and REX2-incompatible opcodes Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 15/21] KVM: x86: Support REX2-prefixed opcode decode Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 16/21] KVM: x86: Reject EVEX-prefixed instructions Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 17/21] KVM: x86: Guard valid XCR0.APX settings Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 18/21] KVM: x86: Expose APX foundation feature to guests Chang S. Bae
2026-05-12 1:15 ` [PATCH v4 19/21] KVM: x86: Expose APX sub-features " Chang S. Bae
2026-05-12 1:15 ` Chang S. Bae [this message]
2026-05-12 1:15 ` [PATCH v4 21/21] KVM: x86: selftests: Add APX state handling and XCR0 sanity checks Chang S. Bae
2026-05-13 17:52 ` [PATCH v4 00/21] KVM: x86: Enable APX for guests Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260512011502.53072-21-chang.seok.bae@intel.com \
--to=chang.seok.bae@intel.com \
--cc=chao.gao@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.