* [PATCH v6 1/7] KVM: x86: Carve out PML flush routine
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
@ 2026-04-07 6:32 ` Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 2/7] KVM: x86: Move PML page to common vcpu arch structure Nikunj A Dadhania
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Nikunj A Dadhania @ 2026-04-07 6:32 UTC (permalink / raw)
To: kvm, seanjc, pbonzini
Cc: thomas.lendacky, bp, joao.m.martins, nikunj, kai.huang
Move the PML (Page Modification Logging) buffer flushing logic from
VMX-specific code to common x86 KVM code to enable reuse by SVM and avoid
code duplication.
The AMD SVM PML implementations share the same behavior as VMX PML:
1) The PML buffer is a 4K page with 512 entries
2) Hardware records dirty GPAs in reverse order (from index 511 to 0)
3) Hardware clears bits 11:0 when recording GPAs
No functional change intended for VMX, except tone down the WARN_ON() to
WARN_ON_ONCE() for the page alignment check. If hardware exhibits this
behavior once, it's likely to occur repeatedly, so use WARN_ON_ONCE() to
avoid log flooding while still capturing the unexpected condition.
The refactoring prepares for SVM to leverage the same PML flushing
implementation.
Reviewed-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
arch/x86/kvm/vmx/vmx.c | 26 ++------------------------
arch/x86/kvm/vmx/vmx.h | 5 -----
arch/x86/kvm/x86.c | 31 +++++++++++++++++++++++++++++++
arch/x86/kvm/x86.h | 8 ++++++++
4 files changed, 41 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8b24e682535b..b77750a2efc2 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6502,37 +6502,15 @@ static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u16 pml_idx, pml_tail_index;
- u64 *pml_buf;
- int i;
+ u16 pml_idx;
pml_idx = vmcs_read16(GUEST_PML_INDEX);
/* Do nothing if PML buffer is empty */
if (pml_idx == PML_HEAD_INDEX)
return;
- /*
- * PML index always points to the next available PML buffer entity
- * unless PML log has just overflowed.
- */
- pml_tail_index = (pml_idx >= PML_LOG_NR_ENTRIES) ? 0 : pml_idx + 1;
- /*
- * PML log is written backwards: the CPU first writes the entry 511
- * then the entry 510, and so on.
- *
- * Read the entries in the same order they were written, to ensure that
- * the dirty ring is filled in the same order the CPU wrote them.
- */
- pml_buf = page_address(vmx->pml_pg);
-
- for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) {
- u64 gpa;
-
- gpa = pml_buf[i];
- WARN_ON(gpa & (PAGE_SIZE - 1));
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
- }
+ kvm_flush_pml_buffer(vcpu, vmx->pml_pg, pml_idx);
/* reset PML index */
vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 70bfe81dea54..9b0c5dde9437 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -262,11 +262,6 @@ struct vcpu_vmx {
unsigned int ple_window;
bool ple_window_dirty;
- /* Support for PML */
-#define PML_LOG_NR_ENTRIES 512
- /* PML is written backwards: this is the first entry written by the CPU */
-#define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
-
struct page *pml_pg;
/* apic deadline value in host tsc */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fd1c4a36b593..628b6f51d2be 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6720,6 +6720,37 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
kvm_vcpu_kick(vcpu);
}
+void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_idx)
+{
+ u16 pml_tail_index;
+ u64 *pml_buf;
+ int i;
+
+ /*
+ * PML index always points to the next available PML buffer entity
+ * unless PML log has just overflowed.
+ */
+ pml_tail_index = (pml_idx >= PML_LOG_NR_ENTRIES) ? 0 : pml_idx + 1;
+
+ /*
+ * PML log is written backwards: the CPU first writes the entry 511
+ * then the entry 510, and so on.
+ *
+ * Read the entries in the same order they were written, to ensure that
+ * the dirty ring is filled in the same order the CPU wrote them.
+ */
+ pml_buf = page_address(pml_page);
+
+ for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) {
+ u64 gpa;
+
+ gpa = pml_buf[i];
+ WARN_ON_ONCE(gpa & (PAGE_SIZE - 1));
+ kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
+ }
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_flush_pml_buffer);
+
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 94d4f07aaaa0..3b2cc2756033 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -763,4 +763,12 @@ static inline bool kvm_is_valid_u_s_cet(struct kvm_vcpu *vcpu, u64 data)
return true;
}
+
+/* Support for PML */
+#define PML_LOG_NR_ENTRIES 512
+/* PML is written backwards: this is the first entry written by the CPU */
+#define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
+
+void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_pg, u16 pml_idx);
+
#endif
--
2.48.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 2/7] KVM: x86: Move PML page to common vcpu arch structure
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 1/7] KVM: x86: Carve out PML flush routine Nikunj A Dadhania
@ 2026-04-07 6:32 ` Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 3/7] KVM: VMX: Use cpu_dirty_log_size instead of enable_pml for PML checks Nikunj A Dadhania
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Nikunj A Dadhania @ 2026-04-07 6:32 UTC (permalink / raw)
To: kvm, seanjc, pbonzini
Cc: thomas.lendacky, bp, joao.m.martins, nikunj, kai.huang
Move the PML page pointer from VMX-specific vcpu_vmx structure to the
common kvm_vcpu_arch structure to enable sharing between VMX and SVM
implementations. Only the page pointer is moved to x86 common code while
keeping allocation logic vendor-specific, since AMD requires
snp_safe_alloc_page() for PML buffer allocation.
Update all VMX references accordingly, and simplify the
kvm_flush_pml_buffer() interface by removing the page parameter since it
can now access the page directly from the vcpu structure.
No functional change, restructuring to prepare for SVM PML support.
Suggested-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/vmx/vmx.c | 24 ++++++++++++------------
arch/x86/kvm/vmx/vmx.h | 2 --
arch/x86/kvm/x86.c | 4 ++--
arch/x86/kvm/x86.h | 2 +-
5 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6e4e3ef9b8c7..9ed19e99d481 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -873,6 +873,8 @@ struct kvm_vcpu_arch {
*/
struct kvm_mmu_memory_cache mmu_external_spt_cache;
+ struct page *pml_page;
+
/*
* QEMU userspace and the guest each have their own FPU state.
* In vcpu_run, we switch between the user and guest FPU contexts.
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b77750a2efc2..909085a5dfb3 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4934,7 +4934,8 @@ int vmx_vcpu_precreate(struct kvm *kvm)
static void init_vmcs(struct vcpu_vmx *vmx)
{
- struct kvm *kvm = vmx->vcpu.kvm;
+ struct kvm_vcpu *vcpu = &vmx->vcpu;
+ struct kvm *kvm = vcpu->kvm;
struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
if (nested)
@@ -5026,7 +5027,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
if (enable_pml) {
- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write64(PML_ADDRESS, page_to_phys(vcpu->arch.pml_page));
vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
}
@@ -6491,17 +6492,16 @@ void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code)
*error_code = 0;
}
-static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
+static void vmx_destroy_pml_buffer(struct kvm_vcpu *vcpu)
{
- if (vmx->pml_pg) {
- __free_page(vmx->pml_pg);
- vmx->pml_pg = NULL;
+ if (vcpu->arch.pml_page) {
+ __free_page(vcpu->arch.pml_page);
+ vcpu->arch.pml_page = NULL;
}
}
static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
u16 pml_idx;
pml_idx = vmcs_read16(GUEST_PML_INDEX);
@@ -6510,7 +6510,7 @@ static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
if (pml_idx == PML_HEAD_INDEX)
return;
- kvm_flush_pml_buffer(vcpu, vmx->pml_pg, pml_idx);
+ kvm_flush_pml_buffer(vcpu, pml_idx);
/* reset PML index */
vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
@@ -7738,7 +7738,7 @@ void vmx_vcpu_free(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (enable_pml)
- vmx_destroy_pml_buffer(vmx);
+ vmx_destroy_pml_buffer(vcpu);
free_vpid(vmx->vpid);
nested_vmx_free_vcpu(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
@@ -7767,8 +7767,8 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
* for the guest), etc.
*/
if (enable_pml) {
- vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
- if (!vmx->pml_pg)
+ vcpu->arch.pml_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!vcpu->arch.pml_page)
goto free_vpid;
}
@@ -7839,7 +7839,7 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
free_vmcs:
free_loaded_vmcs(vmx->loaded_vmcs);
free_pml:
- vmx_destroy_pml_buffer(vmx);
+ vmx_destroy_pml_buffer(vcpu);
free_vpid:
free_vpid(vmx->vpid);
return err;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 9b0c5dde9437..ba04d32a1412 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -262,8 +262,6 @@ struct vcpu_vmx {
unsigned int ple_window;
bool ple_window_dirty;
- struct page *pml_pg;
-
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 628b6f51d2be..52c06d095c25 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6720,7 +6720,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
kvm_vcpu_kick(vcpu);
}
-void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_idx)
+void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, u16 pml_idx)
{
u16 pml_tail_index;
u64 *pml_buf;
@@ -6739,7 +6739,7 @@ void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_
* Read the entries in the same order they were written, to ensure that
* the dirty ring is filled in the same order the CPU wrote them.
*/
- pml_buf = page_address(pml_page);
+ pml_buf = page_address(vcpu->arch.pml_page);
for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) {
u64 gpa;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 3b2cc2756033..21d88a768d8e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -769,6 +769,6 @@ static inline bool kvm_is_valid_u_s_cet(struct kvm_vcpu *vcpu, u64 data)
/* PML is written backwards: this is the first entry written by the CPU */
#define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
-void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_pg, u16 pml_idx);
+void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, u16 pml_idx);
#endif
--
2.48.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 3/7] KVM: VMX: Use cpu_dirty_log_size instead of enable_pml for PML checks
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 1/7] KVM: x86: Carve out PML flush routine Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 2/7] KVM: x86: Move PML page to common vcpu arch structure Nikunj A Dadhania
@ 2026-04-07 6:32 ` Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 4/7] x86/cpufeatures: Add Page modification logging Nikunj A Dadhania
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Nikunj A Dadhania @ 2026-04-07 6:32 UTC (permalink / raw)
To: kvm, seanjc, pbonzini
Cc: thomas.lendacky, bp, joao.m.martins, nikunj, kai.huang,
Pankaj Gupta
Replace the enable_pml check with cpu_dirty_log_size in VMX PML code
to determine whether PML is enabled on a per-VM basis. The enable_pml
module parameter is a global setting that doesn't reflect per-VM
capabilities, whereas cpu_dirty_log_size accurately indicates whether
a specific VM has PML enabled.
For example, TDX VMs don't yet support PML. Using cpu_dirty_log_size
ensures the check correctly reflects this, while enable_pml would
incorrectly indicate PML is available.
This also improves consistency with kvm_mmu_update_cpu_dirty_logging(),
which already uses cpu_dirty_log_size to determine PML enablement.
Suggested-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
arch/x86/kvm/vmx/vmx.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 909085a5dfb3..a5a8bb5a7020 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8439,7 +8439,7 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (WARN_ON_ONCE(!enable_pml))
+ if (WARN_ON_ONCE(!vcpu->kvm->arch.cpu_dirty_log_size))
return;
guard(vmx_vmcs01)(vcpu);
--
2.48.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 4/7] x86/cpufeatures: Add Page modification logging
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
` (2 preceding siblings ...)
2026-04-07 6:32 ` [PATCH v6 3/7] KVM: VMX: Use cpu_dirty_log_size instead of enable_pml for PML checks Nikunj A Dadhania
@ 2026-04-07 6:32 ` Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 5/7] KVM: SVM: Use BIT_ULL for 64-bit nested_ctl bit definitions Nikunj A Dadhania
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Nikunj A Dadhania @ 2026-04-07 6:32 UTC (permalink / raw)
To: kvm, seanjc, pbonzini
Cc: thomas.lendacky, bp, joao.m.martins, nikunj, kai.huang
Page modification logging(PML) is a hardware feature designed to track
guest modified memory pages. PML enables the hypervisor to identify which
pages in a guest's memory have been changed since the last checkpoint or
during live migration.
The PML feature is advertised via CPUID leaf 0x8000000A ECX[4] bit.
Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/kernel/cpu/scattered.c | 1 +
2 files changed, 2 insertions(+)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index dbe104df339b..8c669c9b11d0 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -228,6 +228,7 @@
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* PV unlock function */
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* PV vcpu_is_preempted function */
#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* "tdx_guest" Intel Trust Domain Extensions Guest */
+#define X86_FEATURE_PML ( 8*32+23) /* AMD Page Modification logging */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* "fsgsbase" RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 42c7eac0c387..cdda4e72c5e6 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -53,6 +53,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+ { X86_FEATURE_PML, CPUID_ECX, 4, 0x8000000a, 0 },
{ X86_FEATURE_X2AVIC_EXT, CPUID_ECX, 6, 0x8000000a, 0 },
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
--
2.48.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 5/7] KVM: SVM: Use BIT_ULL for 64-bit nested_ctl bit definitions
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
` (3 preceding siblings ...)
2026-04-07 6:32 ` [PATCH v6 4/7] x86/cpufeatures: Add Page modification logging Nikunj A Dadhania
@ 2026-04-07 6:32 ` Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 6/7] KVM: nSVM: Add helpers to temporarily switch to vmcb01 Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 7/7] KVM: SVM: Add Page modification logging support Nikunj A Dadhania
6 siblings, 0 replies; 8+ messages in thread
From: Nikunj A Dadhania @ 2026-04-07 6:32 UTC (permalink / raw)
To: kvm, seanjc, pbonzini
Cc: thomas.lendacky, bp, joao.m.martins, nikunj, kai.huang
Replace BIT() with BIT_ULL() for SVM nested control bit definitions
since nested_ctl is a 64-bit field in the VMCB control area structure.
No functional change intended.
Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
arch/x86/include/asm/svm.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index edde36097ddc..8fe91a0651bc 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -239,9 +239,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
-#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
-#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
-#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
+#define SVM_NESTED_CTL_NP_ENABLE BIT_ULL(0)
+#define SVM_NESTED_CTL_SEV_ENABLE BIT_ULL(1)
+#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT_ULL(2)
#define SVM_TSC_RATIO_RSVD 0xffffff0000000000ULL
--
2.48.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 6/7] KVM: nSVM: Add helpers to temporarily switch to vmcb01
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
` (4 preceding siblings ...)
2026-04-07 6:32 ` [PATCH v6 5/7] KVM: SVM: Use BIT_ULL for 64-bit nested_ctl bit definitions Nikunj A Dadhania
@ 2026-04-07 6:32 ` Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 7/7] KVM: SVM: Add Page modification logging support Nikunj A Dadhania
6 siblings, 0 replies; 8+ messages in thread
From: Nikunj A Dadhania @ 2026-04-07 6:32 UTC (permalink / raw)
To: kvm, seanjc, pbonzini
Cc: thomas.lendacky, bp, joao.m.martins, nikunj, kai.huang
If KVM needs to update controls that belong in vmcb01 while L2 is active,
it must switch to vmcb01 and update immediately instead of deferring until
the next nested VM-Exit. Deferring updates creates ordering and state
consistency problems, e.g. KVM thinks a control is enabled while vmcb01
still has stale state.
Add helpers to temporarily switch to vmcb01 to ensure updates happen
on-demand.
Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
arch/x86/kvm/svm/svm.c | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index e6477affac9a..32b002f92528 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1251,6 +1251,30 @@ void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
svm->vmcb = target_vmcb->ptr;
}
+static void svm_load_vmcb01(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (!is_guest_mode(vcpu)) {
+ WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr);
+ return;
+ }
+
+ WARN_ON_ONCE(svm->vmcb != svm->nested.vmcb02.ptr);
+ svm_switch_vmcb(svm, &svm->vmcb01);
+}
+
+static void svm_put_vmcb01(struct kvm_vcpu *vcpu)
+{
+ if (!is_guest_mode(vcpu))
+ return;
+
+ svm_switch_vmcb(to_svm(vcpu), &to_svm(vcpu)->nested.vmcb02);
+}
+
+DEFINE_GUARD(svm_vmcb01, struct kvm_vcpu *,
+ svm_load_vmcb01(_T), svm_put_vmcb01(_T))
+
static int svm_vcpu_precreate(struct kvm *kvm)
{
return avic_alloc_physical_id_table(kvm);
--
2.48.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 7/7] KVM: SVM: Add Page modification logging support
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
` (5 preceding siblings ...)
2026-04-07 6:32 ` [PATCH v6 6/7] KVM: nSVM: Add helpers to temporarily switch to vmcb01 Nikunj A Dadhania
@ 2026-04-07 6:32 ` Nikunj A Dadhania
6 siblings, 0 replies; 8+ messages in thread
From: Nikunj A Dadhania @ 2026-04-07 6:32 UTC (permalink / raw)
To: kvm, seanjc, pbonzini
Cc: thomas.lendacky, bp, joao.m.martins, nikunj, kai.huang
Currently, dirty logging relies on write protecting guest memory and
marking dirty GFNs during subsequent write faults. This method works but
incurs overhead due to additional write faults for each dirty GFN.
Implement support for the Page Modification Logging (PML) feature, a
hardware-assisted method for efficient dirty logging. PML automatically
logs dirty GPA[51:12] to a 4K buffer when the CPU sets NPT D-bits. Two new
VMCB fields are utilized: PML_ADDR and PML_INDEX. The PML_INDEX is
initialized to 511 (8 bytes per GPA entry), and the CPU decreases the
PML_INDEX after logging each GPA. When the PML buffer is full, a
VMEXIT(PML_FULL) with exit code 0x407 is generated.
PML operates on guest physical addresses at the NPT level, tracking D-bit
updates in page tables rather than memory content. This allows it to work
identically for normal and confidential computing guests
(SEV/SEV-ES/SEV-SNP):, enabling cpu_dirty_log_size to be set uniformly for
all AMD VMs without special-casing encrypted guests
When L2 is active, svm->vmcb points to vmcb02, so updates to PML controls
must explicitly target vmcb01 to ensure the L1's state remains correct. Use
the svm_vmcb01 guard to ensure vmcb01 is active during the update.
Disable PML for nested guests.
Add a new module parameter to enable/disable PML, and enable it by default
when supported
Acked-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
---
arch/x86/include/asm/svm.h | 6 +-
arch/x86/include/uapi/asm/svm.h | 2 +
arch/x86/kvm/svm/nested.c | 13 +++-
arch/x86/kvm/svm/sev.c | 2 +-
arch/x86/kvm/svm/svm.c | 101 +++++++++++++++++++++++++++++++-
arch/x86/kvm/svm/svm.h | 3 +
6 files changed, 122 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 8fe91a0651bc..d52096d3eaa2 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -165,7 +165,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u8 reserved_9[22];
u64 allowed_sev_features; /* Offset 0x138 */
u64 guest_sev_features; /* Offset 0x140 */
- u8 reserved_10[664];
+ u8 reserved_10[128];
+ u64 pml_addr; /* Offset 0x1c8 */
+ u16 pml_index; /* Offset 0x1d0 */
+ u8 reserved_11[526];
/*
* Offset 0x3e0, 32 bytes reserved
* for use by hypervisor/software.
@@ -242,6 +245,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_NESTED_CTL_NP_ENABLE BIT_ULL(0)
#define SVM_NESTED_CTL_SEV_ENABLE BIT_ULL(1)
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT_ULL(2)
+#define SVM_NESTED_CTL_PML_ENABLE BIT_ULL(11)
#define SVM_TSC_RATIO_RSVD 0xffffff0000000000ULL
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 010a45c9f614..e80676185092 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -101,6 +101,7 @@
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
#define SVM_EXIT_VMGEXIT 0x403
+#define SVM_EXIT_PML_FULL 0x407
/* SEV-ES software-defined VMGEXIT events */
#define SVM_VMGEXIT_MMIO_READ 0x80000001ull
@@ -236,6 +237,7 @@
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \
{ SVM_EXIT_VMGEXIT, "vmgexit" }, \
+ { SVM_EXIT_PML_FULL, "pml_full" }, \
{ SVM_VMGEXIT_MMIO_READ, "vmgexit_mmio_read" }, \
{ SVM_VMGEXIT_MMIO_WRITE, "vmgexit_mmio_write" }, \
{ SVM_VMGEXIT_NMI_COMPLETE, "vmgexit_nmi_complete" }, \
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b36c33255bed..9afe4d86793b 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -790,12 +790,23 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
V_NMI_BLOCKING_MASK);
}
- /* Copied from vmcb01. msrpm_base can be overwritten later. */
+ /* Copied from vmcb01. msrpm_base/nested_ctl can be overwritten later. */
vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
vmcb_mark_dirty(vmcb02, VMCB_PERM_MAP);
+ /*
+ * Disable PML for nested guests. When L2 runs with PML enabled, the
+ * CPU logs L2 GPAs rather than L1 GPAs, breaking dirty page tracking
+ * for the L0 hypervisor.
+ */
+ if (pml) {
+ vmcb02->control.nested_ctl &= ~SVM_NESTED_CTL_PML_ENABLE;
+ vmcb02->control.pml_addr = 0;
+ vmcb02->control.pml_index = -1;
+ }
+
/*
* Stash vmcb02's counter if the guest hasn't moved past the guilty
* instruction; otherwise, reset the counter to '0'.
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 3f9c1aa39a0a..427de45843f2 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -4820,7 +4820,7 @@ struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
* Allocate an SNP-safe page to workaround the SNP erratum where
* the CPU will incorrectly signal an RMP violation #PF if a
* hugepage (2MB or 1GB) collides with the RMP entry of a
- * 2MB-aligned VMCB, VMSA, or AVIC backing page.
+ * 2MB-aligned VMCB, VMSA, PML or AVIC backing page.
*
* Allocate one extra page, choose a page which is not
* 2MB-aligned, and free the other.
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 32b002f92528..f7a5a10f50a2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -172,6 +172,9 @@ module_param(vnmi, bool, 0444);
module_param(enable_mediated_pmu, bool, 0444);
+bool pml = true;
+module_param(pml, bool, 0444);
+
static bool svm_gp_erratum_intercept = true;
static u8 rsm_ins_bytes[] = "\x0f\xaa";
@@ -1206,6 +1209,16 @@ static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
if (vcpu->kvm->arch.bus_lock_detection_enabled)
svm_set_intercept(svm, INTERCEPT_BUSLOCK);
+ if (pml) {
+ /*
+ * Populate the page address and index here, PML is enabled
+ * when dirty logging is enabled on the memslot through
+ * svm_update_cpu_dirty_logging()
+ */
+ control->pml_addr = (u64)__sme_set(page_to_phys(vcpu->arch.pml_page));
+ control->pml_index = PML_HEAD_INDEX;
+ }
+
if (sev_guest(vcpu->kvm))
sev_init_vmcb(svm, init_event);
@@ -1294,9 +1307,15 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
if (!vmcb01_page)
goto out;
+ if (pml) {
+ vcpu->arch.pml_page = snp_safe_alloc_page();
+ if (!vcpu->arch.pml_page)
+ goto error_free_vmcb_page;
+ }
+
err = sev_vcpu_create(vcpu);
if (err)
- goto error_free_vmcb_page;
+ goto error_free_pml_page;
err = avic_init_vcpu(svm);
if (err)
@@ -1321,6 +1340,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
error_free_sev:
sev_free_vcpu(vcpu);
+error_free_pml_page:
+ if (vcpu->arch.pml_page)
+ __free_page(vcpu->arch.pml_page);
error_free_vmcb_page:
__free_page(vmcb01_page);
out:
@@ -1338,6 +1360,9 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
sev_free_vcpu(vcpu);
+ if (pml && vcpu->arch.pml_page)
+ __free_page(vcpu->arch.pml_page);
+
__free_page(__sme_pa_to_page(svm->vmcb01.pa));
svm_vcpu_free_msrpm(svm->msrpm);
}
@@ -3253,6 +3278,54 @@ static int bus_lock_exit(struct kvm_vcpu *vcpu)
return 0;
}
+void svm_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (WARN_ON_ONCE(!vcpu->kvm->arch.cpu_dirty_log_size))
+ return;
+
+ guard(svm_vmcb01)(vcpu);
+
+ /*
+ * Note, nr_memslots_dirty_logging can be changed concurrent with this
+ * code, but in that case another update request will be made and so
+ * the guest will never run with a stale PML value.
+ */
+ if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
+ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_PML_ENABLE;
+ else
+ svm->vmcb->control.nested_ctl &= ~SVM_NESTED_CTL_PML_ENABLE;
+
+ vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
+}
+
+static void svm_flush_pml_buffer(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_control_area *control = &svm->vmcb->control;
+
+ /* Do nothing if PML buffer is empty */
+ if (control->pml_index == PML_HEAD_INDEX)
+ return;
+
+ kvm_flush_pml_buffer(vcpu, control->pml_index);
+
+ /* Reset the PML index */
+ control->pml_index = PML_HEAD_INDEX;
+}
+
+static int pml_full_interception(struct kvm_vcpu *vcpu)
+{
+ trace_kvm_pml_full(vcpu->vcpu_id);
+
+ /*
+ * PML buffer is already flushed at the beginning of svm_handle_exit().
+ * Nothing to do here.
+ */
+ return 1;
+}
+
static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception,
@@ -3329,6 +3402,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
#ifdef CONFIG_KVM_AMD_SEV
[SVM_EXIT_VMGEXIT] = sev_handle_vmgexit,
#endif
+ [SVM_EXIT_PML_FULL] = pml_full_interception,
};
static void dump_vmcb(struct kvm_vcpu *vcpu)
@@ -3378,8 +3452,14 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
- pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
+ pr_err("%-20s%llx\n", "nested_ctl:", control->nested_ctl);
pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
+
+ if (pml) {
+ pr_err("%-20s%016llx\n", "pml_addr:", control->pml_addr);
+ pr_err("%-20s%04x\n", "pml_index:", control->pml_index);
+ }
+
pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
@@ -3625,6 +3705,14 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_run *kvm_run = vcpu->run;
+ /*
+ * Opportunistically flush the PML buffer on VM exit. This keeps the
+ * dirty bitmap current by processing logged GPAs rather than waiting for
+ * PML_FULL exit.
+ */
+ if (vcpu->kvm->arch.cpu_dirty_log_size && !is_guest_mode(vcpu))
+ svm_flush_pml_buffer(vcpu);
+
/* SEV-ES guests must use the CR write traps to track CR registers. */
if (!sev_es_guest(vcpu->kvm)) {
if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
@@ -5135,6 +5223,9 @@ static int svm_vm_init(struct kvm *kvm)
return ret;
}
+ if (pml)
+ kvm->arch.cpu_dirty_log_size = PML_LOG_NR_ENTRIES;
+
svm_srso_vm_init();
return 0;
}
@@ -5289,6 +5380,8 @@ struct kvm_x86_ops svm_x86_ops __initdata = {
.gmem_prepare = sev_gmem_prepare,
.gmem_invalidate = sev_gmem_invalidate,
.gmem_max_mapping_level = sev_gmem_max_mapping_level,
+
+ .update_cpu_dirty_logging = svm_update_cpu_dirty_logging,
};
/*
@@ -5515,6 +5608,10 @@ static __init int svm_hardware_setup(void)
nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
+ pml = pml && npt_enabled && cpu_feature_enabled(X86_FEATURE_PML);
+ if (pml)
+ pr_info("Page modification logging supported\n");
+
if (lbrv) {
if (!boot_cpu_has(X86_FEATURE_LBRV))
lbrv = false;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 6942e6b0eda6..88de4d6cd9bb 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -50,6 +50,7 @@ extern int vgif;
extern bool intercept_smi;
extern bool vnmi;
extern int lbrv;
+extern bool pml;
extern int tsc_aux_uret_slot __ro_after_init;
@@ -746,6 +747,8 @@ static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
svm_set_intercept_for_msr(vcpu, msr, type, true);
}
+void svm_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
+
/* nested.c */
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
--
2.48.1
^ permalink raw reply related [flat|nested] 8+ messages in thread