From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: jon@nutanix.com, mtosatti@redhat.com
Subject: [PATCH 15/22] KVM: x86/mmu: change nested_mmu.w to nested_cpu_walk
Date: Mon, 11 May 2026 11:06:41 -0400 [thread overview]
Message-ID: <20260511150648.685374-16-pbonzini@redhat.com> (raw)
In-Reply-To: <20260511150648.685374-1-pbonzini@redhat.com>
nested_mmu is now only used for its w member. Rename it,
and change its type.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 5 ++--
arch/x86/kvm/mmu.h | 6 ++---
arch/x86/kvm/mmu/mmu.c | 41 ++++++++++++++-------------------
arch/x86/kvm/svm/nested.c | 2 +-
arch/x86/kvm/vmx/nested.c | 2 +-
5 files changed, 24 insertions(+), 32 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6c5c59b9cfe3..8af8016e9364 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -873,11 +873,10 @@ struct kvm_vcpu_arch {
* walking and not for faulting since we never handle l2 page faults on
* the host.
*/
- struct kvm_mmu nested_mmu;
+ struct kvm_pagewalk nested_cpu_walk;
/*
- * Pointer to the mmu context currently used for
- * gva_to_gpa translations.
+ * Pagewalk context used for gva_to_gpa translations.
*/
struct kvm_pagewalk *cpu_walk;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d1b5d9b0c6ad..652803cb36c8 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -177,8 +177,8 @@ static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
* be stale. Refresh CR0.WP and the metadata on-demand when checking
* for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing
* nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does
- * need to refresh nested_mmu, a.k.a. the walker used to translate L2
- * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
+ * need to refresh nested_cpu_walk, a.k.a. the walker used to translate L2
+ * GVAs to GPAs, so as to honor L2's CR0.WP.
*/
if (!tdp_enabled || w == &vcpu->arch.guest_mmu.w)
return;
@@ -306,7 +306,7 @@ static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
struct x86_exception *exception,
u64 pte_access)
{
- if (w != &vcpu->arch.nested_mmu.w)
+ if (w != &vcpu->arch.nested_cpu_walk)
return gpa;
return kvm_x86_ops.nested_ops->translate_nested_gpa(vcpu, gpa, access,
exception,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d6a011b2d36e..bb76835a2e06 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6037,43 +6037,37 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
context->w.get_guest_pgd = get_guest_cr3;
}
-static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
+static void init_kvm_nested_cpu_walk(struct kvm_vcpu *vcpu,
union kvm_cpu_role new_mode)
{
- struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
+ struct kvm_pagewalk *g_context = &vcpu->arch.nested_cpu_walk;
- if (new_mode.as_u64 == g_context->w.cpu_role.as_u64)
+ if (new_mode.as_u64 == g_context->cpu_role.as_u64)
return;
- g_context->w.cpu_role.as_u64 = new_mode.as_u64;
- g_context->w.inject_page_fault = kvm_inject_page_fault;
- g_context->w.get_pdptr = kvm_pdptr_read;
- g_context->w.get_guest_pgd = get_guest_cr3;
-
- /*
- * L2 page tables are never shadowed, so there is no need to sync
- * SPTEs.
- */
- g_context->sync_spte = NULL;
+ g_context->cpu_role.as_u64 = new_mode.as_u64;
+ g_context->inject_page_fault = kvm_inject_page_fault;
+ g_context->get_pdptr = kvm_pdptr_read;
+ g_context->get_guest_pgd = get_guest_cr3;
/*
* Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
* L1's nested page tables (e.g. EPT12). The nested translation
- * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
+ * of l2_gva to l1_gpa is done by arch.nested_cpu_walk.gva_to_gpa using
* L2's page tables as the first level of translation and L1's
* nested page tables as the second level of translation. Basically
- * the gva_to_gpa functions between mmu and nested_mmu are swapped.
+ * the gva_to_gpa functions between mmu and nested_cpu_walk are swapped.
*/
if (!is_paging(vcpu))
- g_context->w.gva_to_gpa = nonpaging_gva_to_gpa;
+ g_context->gva_to_gpa = nonpaging_gva_to_gpa;
else if (is_long_mode(vcpu))
- g_context->w.gva_to_gpa = paging64_gva_to_gpa;
+ g_context->gva_to_gpa = paging64_gva_to_gpa;
else if (is_pae(vcpu))
- g_context->w.gva_to_gpa = paging64_gva_to_gpa;
+ g_context->gva_to_gpa = paging64_gva_to_gpa;
else
- g_context->w.gva_to_gpa = paging32_gva_to_gpa;
+ g_context->gva_to_gpa = paging32_gva_to_gpa;
- reset_guest_paging_metadata(vcpu, &g_context->w);
+ reset_guest_paging_metadata(vcpu, g_context);
}
void kvm_init_mmu(struct kvm_vcpu *vcpu)
@@ -6082,7 +6076,7 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu)
union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
if (mmu_is_nested(vcpu))
- init_kvm_nested_mmu(vcpu, cpu_role);
+ init_kvm_nested_cpu_walk(vcpu, cpu_role);
else if (tdp_enabled)
init_kvm_tdp_mmu(vcpu, cpu_role);
else
@@ -6106,10 +6100,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
vcpu->arch.root_mmu.root_role.invalid = 1;
vcpu->arch.guest_mmu.root_role.invalid = 1;
- vcpu->arch.nested_mmu.root_role.invalid = 1;
vcpu->arch.root_mmu.w.cpu_role.ext.valid = 0;
vcpu->arch.guest_mmu.w.cpu_role.ext.valid = 0;
- vcpu->arch.nested_mmu.w.cpu_role.ext.valid = 0;
+ vcpu->arch.nested_cpu_walk.cpu_role.ext.valid = 0;
kvm_mmu_reset_context(vcpu);
KVM_BUG_ON(!kvm_can_set_cpuid_and_feature_msrs(vcpu), vcpu->kvm);
@@ -6611,7 +6604,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_pagewalk *w,
return;
kvm_x86_call(flush_tlb_gva)(vcpu, addr);
- if (w == &vcpu->arch.nested_mmu.w)
+ if (w == &vcpu->arch.nested_cpu_walk)
return;
}
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 4781145faa14..676a49c55f8d 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -102,7 +102,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->w.get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->w.inject_page_fault = nested_svm_inject_npf_exit;
- vcpu->arch.cpu_walk = &vcpu->arch.nested_mmu.w;
+ vcpu->arch.cpu_walk = &vcpu->arch.nested_cpu_walk;
}
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ed72625005fc..b23900f2f6b4 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -499,7 +499,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->w.inject_page_fault = nested_ept_inject_page_fault;
- vcpu->arch.cpu_walk = &vcpu->arch.nested_mmu.w;
+ vcpu->arch.cpu_walk = &vcpu->arch.nested_cpu_walk;
}
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
--
2.52.0
next prev parent reply other threads:[~2026-05-11 15:07 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-11 15:06 [RFC PATCH 00/22] KVM: apply chainsaw to struct kvm_mmu Paolo Bonzini
2026-05-11 15:06 ` [PATCH 01/22] KVM: x86: remove nested_mmu from mmu_is_nested() Paolo Bonzini
2026-05-11 15:06 ` [PATCH 02/22] KVM: x86: move pdptrs out of the MMU Paolo Bonzini
2026-05-11 15:06 ` [PATCH 03/22] KVM: x86: check that kvm_handle_invpcid is only invoked with shadow paging Paolo Bonzini
2026-05-11 15:06 ` [PATCH 04/22] KVM: x86/hyperv: remove unnecessary mmu_is_nested() check Paolo Bonzini
2026-05-11 15:06 ` [PATCH 05/22] KVM: x86/mmu: introduce struct kvm_pagewalk Paolo Bonzini
2026-05-11 15:06 ` [PATCH 06/22] KVM: x86/mmu: move get_guest_pgd to " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 07/22] KVM: x86/mmu: move gva_to_gpa " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 08/22] KVM: x86/mmu: move get_pdptr " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 09/22] KVM: x86/mmu: move inject_page_fault " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 10/22] KVM: x86/mmu: move CPU-related fields " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 11/22] KVM: x86/mmu: change CPU-role accessor fields to take " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 12/22] KVM: x86/mmu: move remaining permission fields to " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 13/22] KVM: x86/mmu: pass struct kvm_pagewalk to kvm_mmu_invalidate_addr Paolo Bonzini
2026-05-11 15:06 ` [PATCH 14/22] KVM: x86/mmu: change walk_mmu to struct kvm_pagewalk Paolo Bonzini
2026-05-11 15:06 ` Paolo Bonzini [this message]
2026-05-11 15:06 ` [PATCH 16/22] KVM: x86/mmu: make cpu_walk a value Paolo Bonzini
2026-05-11 15:06 ` [PATCH 17/22] KVM: x86/mmu: pull struct kvm_pagewalk out of struct kvm_mmu Paolo Bonzini
2026-05-11 15:06 ` [PATCH 18/22] KVM: x86/mmu: cleanup functions that initialize shadow MMU Paolo Bonzini
2026-05-11 15:06 ` [PATCH 19/22] KVM: x86/mmu: pull page format to a new struct Paolo Bonzini
2026-05-11 15:06 ` [PATCH 20/22] KVM: x86/mmu: merge struct rsvd_bits_validate into struct kvm_page_format Paolo Bonzini
2026-05-11 15:06 ` [PATCH 21/22] KVM: x86/mmu: parameterize update_permission_bitmask() Paolo Bonzini
2026-05-11 15:06 ` [PATCH 22/22] KVM: x86/mmu: use kvm_page_format to test SPTEs Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260511150648.685374-16-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=jon@nutanix.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox