From: Sean Christopherson <seanjc@google.com>
To: Nikunj A Dadhania <nikunj@amd.com>
Cc: kvm@vger.kernel.org, pbonzini@redhat.com,
thomas.lendacky@amd.com, bp@alien8.de,
joao.m.martins@oracle.com, kai.huang@intel.com
Subject: Re: [PATCH v6 2/7] KVM: x86: Move PML page to common vcpu arch structure
Date: Fri, 8 May 2026 15:57:24 -0700 [thread overview]
Message-ID: <af5qVIXf06WyK4XG@google.com> (raw)
In-Reply-To: <20260407063245.2755579-3-nikunj@amd.com>
On Tue, Apr 07, 2026, Nikunj A Dadhania wrote:
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index b77750a2efc2..909085a5dfb3 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -4934,7 +4934,8 @@ int vmx_vcpu_precreate(struct kvm *kvm)
>
> static void init_vmcs(struct vcpu_vmx *vmx)
> {
> - struct kvm *kvm = vmx->vcpu.kvm;
> + struct kvm_vcpu *vcpu = &vmx->vcpu;
> + struct kvm *kvm = vcpu->kvm;
There are 9+ existing uses of "vmx->vcpu" in init_vmcs(). I'd rather insert a
prep patch to pass @vcpu instead of @vmx, do to_vmx() at the top, and switch all
existing vmx->vcpu usage.
From: Sean Christopherson <seanjc@google.com>
Date: Fri, 8 May 2026 15:55:55 -0700
Subject: [PATCH] KVM: VMX: Pass @vcpu, not @vmx to init_vmcs()
Pass @vcpu instead of @vmx to init_vmcs(), and switch all of the vmx->vcpu
usage to a simple vcpu.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/vmx/vmx.c | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5c2c33a5f7dc..b8d517b03bb4 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4823,10 +4823,11 @@ int vmx_vcpu_precreate(struct kvm *kvm)
#define VMX_XSS_EXIT_BITMAP 0
-static void init_vmcs(struct vcpu_vmx *vmx)
+static void init_vmcs(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vmx->vcpu.kvm;
+ struct kvm *kvm = vcpu->kvm;
struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
if (nested)
nested_vmx_set_vmcs_shadowing_bitmap();
@@ -4851,7 +4852,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
if (cpu_has_tertiary_exec_ctrls())
tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
- if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
+ if (enable_apicv && lapic_in_kernel(vcpu)) {
vmcs_write64(EOI_EXIT_BITMAP0, 0);
vmcs_write64(EOI_EXIT_BITMAP1, 0);
vmcs_write64(EOI_EXIT_BITMAP2, 0);
@@ -4863,7 +4864,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->vt.pi_desc)));
}
- if (vmx_can_use_ipiv(&vmx->vcpu)) {
+ if (vmx_can_use_ipiv(vcpu)) {
vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
}
@@ -4898,15 +4899,15 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
- vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+ vmcs_write64(GUEST_IA32_PAT, vcpu->arch.pat);
vm_exit_controls_set(vmx, vmx_get_initial_vmexit_ctrl());
/* 22.2.1, 20.8.1 */
vm_entry_controls_set(vmx, vmx_get_initial_vmentry_ctrl());
- vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
- vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
+ vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
set_cr4_guest_host_mask(vmx);
@@ -4921,7 +4922,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
}
- vmx_write_encls_bitmap(&vmx->vcpu, NULL);
+ vmx_write_encls_bitmap(vcpu, NULL);
if (vmx_pt_mode_is_host_guest()) {
memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
@@ -4934,13 +4935,13 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_writel(GUEST_SYSENTER_ESP, 0);
vmcs_writel(GUEST_SYSENTER_EIP, 0);
- vmx_guest_debugctl_write(&vmx->vcpu, 0);
+ vmx_guest_debugctl_write(vcpu, 0);
if (cpu_has_vmx_tpr_shadow()) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
- if (cpu_need_tpr_shadow(&vmx->vcpu))
+ if (cpu_need_tpr_shadow(vcpu))
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
- __pa(vmx->vcpu.arch.apic->regs));
+ __pa(vcpu->arch.apic->regs));
vmcs_write32(TPR_THRESHOLD, 0);
}
@@ -4951,7 +4952,7 @@ static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- init_vmcs(vmx);
+ init_vmcs(vcpu);
if (nested &&
kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
base-commit: 4c88d35f51436169884ce87bcf26f31e10106e71
--
next prev parent reply other threads:[~2026-05-08 22:57 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-07 6:32 [PATCH v6 0/7] KVM: SVM: Add Page Modification Logging (PML) support Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 1/7] KVM: x86: Carve out PML flush routine Nikunj A Dadhania
2026-05-08 22:52 ` Sean Christopherson
2026-05-12 5:36 ` Nikunj A. Dadhania
2026-04-07 6:32 ` [PATCH v6 2/7] KVM: x86: Move PML page to common vcpu arch structure Nikunj A Dadhania
2026-05-08 22:57 ` Sean Christopherson [this message]
2026-05-12 5:45 ` Nikunj A. Dadhania
2026-04-07 6:32 ` [PATCH v6 3/7] KVM: VMX: Use cpu_dirty_log_size instead of enable_pml for PML checks Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 4/7] x86/cpufeatures: Add Page modification logging Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 5/7] KVM: SVM: Use BIT_ULL for 64-bit nested_ctl bit definitions Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 6/7] KVM: nSVM: Add helpers to temporarily switch to vmcb01 Nikunj A Dadhania
2026-04-07 6:32 ` [PATCH v6 7/7] KVM: SVM: Add Page modification logging support Nikunj A Dadhania
2026-04-20 6:38 ` Nikunj A. Dadhania
2026-04-21 15:08 ` Sean Christopherson
2026-04-21 23:50 ` Huang, Kai
2026-04-22 0:30 ` Sean Christopherson
2026-04-22 1:42 ` Huang, Kai
2026-04-22 5:59 ` Nikunj A. Dadhania
2026-04-22 8:14 ` Huang, Kai
2026-04-22 13:20 ` Sean Christopherson
2026-04-22 22:14 ` Huang, Kai
2026-04-24 16:25 ` Tom Lendacky
2026-04-25 14:45 ` Tom Lendacky
2026-04-27 20:16 ` Sean Christopherson
2026-04-21 23:04 ` Yosry Ahmed
2026-04-21 23:15 ` Sean Christopherson
2026-04-22 6:26 ` Nikunj A. Dadhania
2026-04-22 19:48 ` Yosry Ahmed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=af5qVIXf06WyK4XG@google.com \
--to=seanjc@google.com \
--cc=bp@alien8.de \
--cc=joao.m.martins@oracle.com \
--cc=kai.huang@intel.com \
--cc=kvm@vger.kernel.org \
--cc=nikunj@amd.com \
--cc=pbonzini@redhat.com \
--cc=thomas.lendacky@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox