public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Nikunj A Dadhania <nikunj@amd.com>
To: <seanjc@google.com>, <pbonzini@redhat.com>
Cc: <kvm@vger.kernel.org>, <thomas.lendacky@amd.com>,
	<santosh.shukla@amd.com>, <bp@alien8.de>,
	<joao.m.martins@oracle.com>, <nikunj@amd.com>,
	<kai.huang@intel.com>
Subject: [PATCH v5 8/8] selftests: KVM: x86: Add SEV PML dirty logging test
Date: Mon, 5 Jan 2026 06:36:22 +0000	[thread overview]
Message-ID: <20260105063622.894410-9-nikunj@amd.com> (raw)
In-Reply-To: <20260105063622.894410-1-nikunj@amd.com>

Add a KVM selftest to verify Page Modification Logging (PML) functionality
with AMD SEV/SEV-ES/SEV-SNP guests. The test validates that
hardware-assisted dirty page tracking works correctly across different SEV
guest types.

Test methodology:
- Create SEV guest with additional memory slot for dirty logging
- Guest continuously writes to random pages within the test memory region
- Host periodically retrieves dirty log bitmap via KVM_GET_DIRTY_LOG
- Verify dirty pages match actual guest writes

Introduce vm_sev_create_with_one_vcpu_extramem() to allow specifying extra
memory pages during VM creation.

Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
 tools/testing/selftests/kvm/Makefile.kvm      |   1 +
 tools/testing/selftests/kvm/include/x86/sev.h |   4 +
 tools/testing/selftests/kvm/lib/x86/sev.c     |  18 +-
 .../testing/selftests/kvm/x86/sev_pml_test.c  | 203 ++++++++++++++++++
 4 files changed, 223 insertions(+), 3 deletions(-)
 create mode 100644 tools/testing/selftests/kvm/x86/sev_pml_test.c

diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index ba5c2b643efa..746c79713c8d 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -134,6 +134,7 @@ TEST_GEN_PROGS_x86 += x86/xen_vmcall_test
 TEST_GEN_PROGS_x86 += x86/sev_init2_tests
 TEST_GEN_PROGS_x86 += x86/sev_migrate_tests
 TEST_GEN_PROGS_x86 += x86/sev_smoke_test
+TEST_GEN_PROGS_x86 += x86/sev_pml_test
 TEST_GEN_PROGS_x86 += x86/amx_test
 TEST_GEN_PROGS_x86 += x86/max_vcpuid_cap_test
 TEST_GEN_PROGS_x86 += x86/triple_fault_event_test
diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h
index 008b4169f5e2..b06583b91447 100644
--- a/tools/testing/selftests/kvm/include/x86/sev.h
+++ b/tools/testing/selftests/kvm/include/x86/sev.h
@@ -53,8 +53,12 @@ void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy);
 void snp_vm_launch_update(struct kvm_vm *vm);
 void snp_vm_launch_finish(struct kvm_vm *vm);
 
+struct kvm_vm *_vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+					   struct kvm_vcpu **cpu, uint64_t npages);
 struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
 					   struct kvm_vcpu **cpu);
+struct kvm_vm *vm_sev_create_with_one_vcpu_extramem(uint32_t type, void *guest_code,
+					   struct kvm_vcpu **cpu, uint64_t npages);
 void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement);
 
 kvm_static_assert(SEV_RET_SUCCESS == 0);
diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c
index c3a9838f4806..20d67d01c997 100644
--- a/tools/testing/selftests/kvm/lib/x86/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86/sev.c
@@ -158,8 +158,8 @@ void snp_vm_launch_finish(struct kvm_vm *vm)
 	vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);
 }
 
-struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
-					   struct kvm_vcpu **cpu)
+struct kvm_vm *_vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+					   struct kvm_vcpu **cpu, uint64_t npages)
 {
 	struct vm_shape shape = {
 		.mode = VM_MODE_DEFAULT,
@@ -168,12 +168,24 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
 	struct kvm_vm *vm;
 	struct kvm_vcpu *cpus[1];
 
-	vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);
+	vm = __vm_create_with_vcpus(shape, 1, npages, guest_code, cpus);
 	*cpu = cpus[0];
 
 	return vm;
 }
 
+struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+					   struct kvm_vcpu **cpu)
+{
+	return _vm_sev_create_with_one_vcpu(type, guest_code, cpu, 0);
+}
+
+struct kvm_vm *vm_sev_create_with_one_vcpu_extramem(uint32_t type, void *guest_code,
+						    struct kvm_vcpu **cpu, uint64_t npages)
+{
+	return _vm_sev_create_with_one_vcpu(type, guest_code, cpu, npages);
+}
+
 void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)
 {
 	if (is_sev_snp_vm(vm)) {
diff --git a/tools/testing/selftests/kvm/x86/sev_pml_test.c b/tools/testing/selftests/kvm/x86/sev_pml_test.c
new file mode 100644
index 000000000000..b1114f5a67f8
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/sev_pml_test.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitmap.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "sev.h"
+
+#define GUEST_NR_PAGES (1024)
+#define DEFAULT_GUEST_TEST_MEM 0xC0000000
+#define TEST_MEM_SLOT_INDEX 1
+
+/*
+ * Guest/Host shared variables.
+ */
+static uint64_t guest_page_size;
+static uint64_t guest_num_pages;
+
+/* Points to the test VM memory region on which we track dirty logs */
+static void *host_test_mem;
+
+/* Host variables */
+static pthread_t vcpu_thread;
+static bool vcpu_thread_done;
+
+/*
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
+ */
+static uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+
+/*
+ * Continuously write to the first 8 bytes of a random pages within
+ * the testing memory region.
+ */
+static void guest_pml_code(void)
+{
+	uint64_t addr;
+	int write = 0;
+
+	while (write++ != (guest_num_pages * 10)) {
+		addr = guest_test_virt_mem;
+		addr += (guest_random_u64(&guest_rng) % guest_num_pages) * guest_page_size;
+
+		vcpu_arch_put_guest(*(uint64_t *)addr, 0xAA);
+	}
+}
+
+static void guest_pml_sev_code(void)
+{
+	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
+
+	guest_pml_code();
+
+	GUEST_DONE();
+}
+
+static void guest_pml_sev_es_code(void)
+{
+	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
+	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
+
+	guest_pml_code();
+
+	wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
+	vmgexit();
+}
+
+static void guest_pml_sev_snp_code(void)
+{
+	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
+	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
+	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_SNP_ENABLED);
+
+	guest_pml_code();
+
+	wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
+	vmgexit();
+}
+
+static unsigned long *bmap;
+static void *vcpu_worker(void *data)
+{
+	struct kvm_vcpu *vcpu = data;
+	struct kvm_vm *vm;
+	struct ucall uc;
+
+	vm = vcpu->vm;
+	while (1) {
+		/* Let the guest dirty the random pages */
+		vcpu_run(vcpu);
+
+		if (is_sev_es_vm(vm)) {
+			TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
+				    "Wanted SYSTEM_EVENT, got %s",
+				    exit_reason_str(vcpu->run->exit_reason));
+			TEST_ASSERT_EQ(vcpu->run->system_event.type, KVM_SYSTEM_EVENT_SEV_TERM);
+			TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1);
+			TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ);
+			break;
+		}
+
+		switch (get_ucall(vcpu, &uc)) {
+		case UCALL_SYNC:
+			continue;
+		case UCALL_DONE:
+			goto exit_done;
+		case UCALL_ABORT:
+			REPORT_GUEST_ASSERT(uc);
+		default:
+			TEST_FAIL("Unexpected exit: %s", exit_reason_str(vcpu->run->exit_reason));
+		}
+	}
+
+exit_done:
+	WRITE_ONCE(vcpu_thread_done, true);
+	return NULL;
+}
+
+static void vm_dirty_log_verify(void)
+{
+	uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0;
+
+	for (page = 0; page < guest_num_pages; page++) {
+		uint64_t val = *(uint64_t *)(host_test_mem + page * guest_page_size);
+		bool bmap_dirty = __test_and_clear_bit(page, bmap);
+
+		if (bmap_dirty && val == 0xAA)
+			nr_dirty_pages++;
+		else
+			nr_clean_pages++;
+	}
+	pr_debug("Dirty pages %ld clean pages %ld\n", nr_dirty_pages, nr_clean_pages);
+}
+
+void test_pml(void *guest_code, uint32_t type, uint64_t policy)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+
+	vm = vm_sev_create_with_one_vcpu_extramem(type, guest_code, &vcpu, 2 * GUEST_NR_PAGES);
+
+	guest_page_size = vm->page_size;
+	guest_num_pages = GUEST_NR_PAGES;
+	guest_test_phys_mem = (vm->max_gfn - guest_num_pages) * guest_page_size;
+
+	bmap = bitmap_zalloc(guest_num_pages);
+
+	/* Add an extra memory slot for testing dirty logging */
+	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+				guest_test_phys_mem,
+				TEST_MEM_SLOT_INDEX,
+				guest_num_pages,
+				KVM_MEM_LOG_DIRTY_PAGES);
+
+	/* Do mapping for the dirty track memory slot */
+	virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
+	host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
+
+	/* Export the shared variables to the guest */
+	sync_global_to_guest(vm, guest_page_size);
+	sync_global_to_guest(vm, guest_test_virt_mem);
+	sync_global_to_guest(vm, guest_num_pages);
+
+	WRITE_ONCE(vcpu_thread_done, false);
+	vm_sev_launch(vm, policy, NULL);
+
+	pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
+	while (!READ_ONCE(vcpu_thread_done)) {
+		usleep(1000);
+		kvm_vm_get_dirty_log(vcpu->vm, TEST_MEM_SLOT_INDEX, bmap);
+	}
+	pthread_join(vcpu_thread, NULL);
+
+	vm_dirty_log_verify();
+	free(bmap);
+
+	kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+	TEST_REQUIRE(get_kvm_amd_param_bool("pml"));
+
+	if (kvm_cpu_has(X86_FEATURE_SEV))
+		test_pml(guest_pml_sev_code, KVM_X86_SEV_VM, SEV_POLICY_NO_DBG);
+
+	if (kvm_cpu_has(X86_FEATURE_SEV_ES))
+		test_pml(guest_pml_sev_es_code, KVM_X86_SEV_ES_VM,
+			 SEV_POLICY_ES | SEV_POLICY_NO_DBG);
+
+	if (kvm_cpu_has(X86_FEATURE_SEV_SNP))
+		test_pml(guest_pml_sev_snp_code, KVM_X86_SNP_VM,
+			 snp_default_policy() | SNP_POLICY_DBG);
+
+	return 0;
+}
-- 
2.48.1


  parent reply	other threads:[~2026-01-05  6:37 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-05  6:36 [PATCH v5 0/8] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
2026-01-05  6:36 ` [PATCH v5 1/8] KVM: x86: Carve out PML flush routine Nikunj A Dadhania
2026-01-12 10:02   ` Huang, Kai
2026-01-14 13:57     ` Nikunj A. Dadhania
2026-01-05  6:36 ` [PATCH v5 2/8] KVM: x86: Move PML page to common vcpu arch structure Nikunj A Dadhania
2026-01-12 10:07   ` Huang, Kai
2026-01-05  6:36 ` [PATCH v5 3/8] KVM: VMX: Use cpu_dirty_log_size instead of enable_pml for PML checks Nikunj A Dadhania
2026-01-05  6:49   ` Gupta, Pankaj
2026-01-05  6:36 ` [PATCH v5 4/8] KVM: x86: Move nested CPU dirty logging logic to common code Nikunj A Dadhania
2026-01-12 10:08   ` Huang, Kai
2026-01-05  6:36 ` [PATCH v5 5/8] x86/cpufeatures: Add Page modification logging Nikunj A Dadhania
2026-01-05  6:36 ` [PATCH v5 6/8] KVM: SVM: Use BIT_ULL for 64-bit nested_ctl bit definitions Nikunj A Dadhania
2026-01-05  6:36 ` [PATCH v5 7/8] KVM: SVM: Add Page modification logging support Nikunj A Dadhania
2026-01-12 10:24   ` Huang, Kai
2026-01-14 14:03     ` Nikunj A. Dadhania
2026-01-14 23:10       ` Huang, Kai
2026-01-14 22:48   ` Huang, Kai
2026-01-16  4:12     ` Nikunj A. Dadhania
2026-01-05  6:36 ` Nikunj A Dadhania [this message]
2026-01-14 11:36   ` [PATCH v5 8/8] selftests: KVM: x86: Add SEV PML dirty logging test Huang, Kai
2026-01-14 14:27     ` Nikunj A. Dadhania
2026-01-14 22:44       ` Huang, Kai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260105063622.894410-9-nikunj@amd.com \
    --to=nikunj@amd.com \
    --cc=bp@alien8.de \
    --cc=joao.m.martins@oracle.com \
    --cc=kai.huang@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=santosh.shukla@amd.com \
    --cc=seanjc@google.com \
    --cc=thomas.lendacky@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox