From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: jon@nutanix.com, mtosatti@redhat.com
Subject: [PATCH 14/22] KVM: x86/mmu: change walk_mmu to struct kvm_pagewalk
Date: Mon, 11 May 2026 11:06:40 -0400 [thread overview]
Message-ID: <20260511150648.685374-15-pbonzini@redhat.com> (raw)
In-Reply-To: <20260511150648.685374-1-pbonzini@redhat.com>
Now that walk_mmu is only accessed for its "w" member, store
directly the pointer to it. This also means that nested_mmu
is only accessed for its "w" member.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/hyperv.c | 2 +-
arch/x86/kvm/mmu/mmu.c | 4 +--
arch/x86/kvm/mmu/paging_tmpl.h | 4 +--
arch/x86/kvm/svm/nested.c | 4 +--
arch/x86/kvm/vmx/nested.c | 4 +--
arch/x86/kvm/x86.c | 44 +++++++++++++++++----------------
7 files changed, 33 insertions(+), 31 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a1a09b59ac0b..6c5c59b9cfe3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -879,7 +879,7 @@ struct kvm_vcpu_arch {
* Pointer to the mmu context currently used for
* gva_to_gpa translations.
*/
- struct kvm_mmu *walk_mmu;
+ struct kvm_pagewalk *cpu_walk;
u64 pdptrs[4]; /* pae */
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index a6e7d6f85409..36e416eb92d1 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2041,7 +2041,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
* read with kvm_read_guest().
*/
if (!hc->fast) {
- hc->ingpa = kvm_translate_gpa(vcpu, &vcpu->arch.walk_mmu->w, hc->ingpa,
+ hc->ingpa = kvm_translate_gpa(vcpu, vcpu->arch.cpu_walk, hc->ingpa,
PFERR_GUEST_FINAL_MASK, NULL, 0);
if (unlikely(hc->ingpa == INVALID_GPA))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 967c2226cba0..d6a011b2d36e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6641,7 +6641,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
* be synced when switching to that new cr3, so nothing needs to be
* done here for them.
*/
- kvm_mmu_invalidate_addr(vcpu, &vcpu->arch.walk_mmu->w, gva, KVM_MMU_ROOTS_ALL);
+ kvm_mmu_invalidate_addr(vcpu, vcpu->arch.cpu_walk, gva, KVM_MMU_ROOTS_ALL);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invlpg);
@@ -6778,7 +6778,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
vcpu->arch.mmu = &vcpu->arch.root_mmu;
- vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.cpu_walk = &vcpu->arch.root_mmu.w;
ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
if (ret)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 99a0e1c95223..c7690f4929ae 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -541,7 +541,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
}
#endif
walker->fault.address = addr;
- walker->fault.nested_page_fault = w != &vcpu->arch.walk_mmu->w;
+ walker->fault.nested_page_fault = w != vcpu->arch.cpu_walk;
walker->fault.async_page_fault = false;
trace_kvm_mmu_walker_error(walker->fault.error_code);
@@ -894,7 +894,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_pagewalk *w,
#ifndef CONFIG_X86_64
/* A 64-bit GVA should be impossible on 32-bit KVM. */
- WARN_ON_ONCE((addr >> 32) && w == &vcpu->arch.walk_mmu->w);
+ WARN_ON_ONCE((addr >> 32) && w == vcpu->arch.cpu_walk);
#endif
r = FNAME(walk_addr_generic)(&walker, vcpu, w, addr, access);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index f7168fc8046b..4781145faa14 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -102,13 +102,13 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->w.get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->w.inject_page_fault = nested_svm_inject_npf_exit;
- vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
+ vcpu->arch.cpu_walk = &vcpu->arch.nested_mmu.w;
}
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
vcpu->arch.mmu = &vcpu->arch.root_mmu;
- vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.cpu_walk = &vcpu->arch.root_mmu.w;
}
static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index af773b4e008b..ed72625005fc 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -499,13 +499,13 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->w.inject_page_fault = nested_ept_inject_page_fault;
- vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
+ vcpu->arch.cpu_walk = &vcpu->arch.nested_mmu.w;
}
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
vcpu->arch.mmu = &vcpu->arch.root_mmu;
- vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.cpu_walk = &vcpu->arch.root_mmu.w;
}
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c2de39ad7595..03ee584986ac 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -990,11 +990,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
- struct kvm_mmu *fault_mmu;
+ struct kvm_pagewalk *fault_walk;
+
WARN_ON_ONCE(fault->vector != PF_VECTOR);
- fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
- vcpu->arch.walk_mmu;
+ fault_walk = fault->nested_page_fault ? &vcpu->arch.mmu->w :
+ vcpu->arch.cpu_walk;
/*
* Invalidate the TLB entry for the faulting address, if it exists,
@@ -1002,10 +1003,10 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
*/
if ((fault->error_code & PFERR_PRESENT_MASK) &&
!(fault->error_code & PFERR_RSVD_MASK))
- kvm_mmu_invalidate_addr(vcpu, &fault_mmu->w, fault->address,
+ kvm_mmu_invalidate_addr(vcpu, fault_walk, fault->address,
KVM_MMU_ROOT_CURRENT);
- fault_mmu->w.inject_page_fault(vcpu, fault);
+ fault_walk->inject_page_fault(vcpu, fault);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_emulated_page_fault);
@@ -1060,7 +1061,7 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
*/
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
{
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+ struct kvm_pagewalk *w = vcpu->arch.cpu_walk;
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
gpa_t real_gpa;
int i;
@@ -1071,7 +1072,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
* If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
* to an L1 GPA.
*/
- real_gpa = kvm_translate_gpa(vcpu, &mmu->w, gfn_to_gpa(pdpt_gfn),
+ real_gpa = kvm_translate_gpa(vcpu, w, gfn_to_gpa(pdpt_gfn),
PFERR_USER_MASK | PFERR_WRITE_MASK |
PFERR_GUEST_PAGE_MASK, NULL, 0);
if (real_gpa == INVALID_GPA)
@@ -1095,7 +1096,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
* Shadow page roots need to be reconstructed instead.
*/
if (!tdp_enabled && memcmp(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)))
- kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
+ kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.root_mmu,
+ KVM_MMU_ROOT_CURRENT);
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
@@ -7851,7 +7853,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- struct kvm_pagewalk *cpu_walk = &vcpu->arch.walk_mmu->w;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
return cpu_walk->gva_to_gpa(vcpu, cpu_walk, gva, access, exception);
@@ -7861,7 +7863,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_read);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- struct kvm_pagewalk *cpu_walk = &vcpu->arch.walk_mmu->w;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
@@ -7873,7 +7875,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_write);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- struct kvm_pagewalk *cpu_walk = &vcpu->arch.walk_mmu->w;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
return cpu_walk->gva_to_gpa(vcpu, cpu_walk, gva, 0, exception);
}
@@ -7882,7 +7884,7 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception)
{
- struct kvm_pagewalk *cpu_walk = &vcpu->arch.walk_mmu->w;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
void *data = val;
int r = X86EMUL_CONTINUE;
@@ -7915,7 +7917,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- struct kvm_pagewalk *cpu_walk = &vcpu->arch.walk_mmu->w;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
unsigned offset;
int ret;
@@ -7974,7 +7976,7 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception)
{
- struct kvm_pagewalk *cpu_walk = &vcpu->arch.walk_mmu->w;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
void *data = val;
int r = X86EMUL_CONTINUE;
@@ -8080,7 +8082,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t *gpa, struct x86_exception *exception,
bool write)
{
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0);
@@ -8090,7 +8092,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
* shadow page table for L2 guest.
*/
if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) ||
- !permission_fault(vcpu, &vcpu->arch.walk_mmu->w,
+ !permission_fault(vcpu, cpu_walk,
vcpu->arch.mmio_access, 0, access))) {
*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
(gva & (PAGE_SIZE - 1));
@@ -8098,7 +8100,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
return 1;
}
- *gpa = mmu->w.gva_to_gpa(vcpu, &mmu->w, gva, access, exception);
+ *gpa = cpu_walk->gva_to_gpa(vcpu, cpu_walk, gva, access, exception);
if (*gpa == INVALID_GPA)
return -1;
@@ -14211,15 +14213,15 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spec_ctrl_test_value);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+ struct kvm_pagewalk *cpu_walk = vcpu->arch.cpu_walk;
struct x86_exception fault;
u64 access = error_code &
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
- mmu->w.gva_to_gpa(vcpu, &mmu->w, gva, access, &fault) != INVALID_GPA) {
+ cpu_walk->gva_to_gpa(vcpu, cpu_walk, gva, access, &fault) != INVALID_GPA) {
/*
- * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
+ * If cpu_walk->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed
* with the error code that the processor gave.
*/
@@ -14230,7 +14232,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
fault.address = gva;
fault.async_page_fault = false;
}
- vcpu->arch.walk_mmu->w.inject_page_fault(vcpu, &fault);
+ cpu_walk->inject_page_fault(vcpu, &fault);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fixup_and_inject_pf_error);
--
2.52.0
next prev parent reply other threads:[~2026-05-11 15:07 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-11 15:06 [RFC PATCH 00/22] KVM: apply chainsaw to struct kvm_mmu Paolo Bonzini
2026-05-11 15:06 ` [PATCH 01/22] KVM: x86: remove nested_mmu from mmu_is_nested() Paolo Bonzini
2026-05-11 15:06 ` [PATCH 02/22] KVM: x86: move pdptrs out of the MMU Paolo Bonzini
2026-05-11 15:06 ` [PATCH 03/22] KVM: x86: check that kvm_handle_invpcid is only invoked with shadow paging Paolo Bonzini
2026-05-11 15:06 ` [PATCH 04/22] KVM: x86/hyperv: remove unnecessary mmu_is_nested() check Paolo Bonzini
2026-05-11 15:06 ` [PATCH 05/22] KVM: x86/mmu: introduce struct kvm_pagewalk Paolo Bonzini
2026-05-11 15:06 ` [PATCH 06/22] KVM: x86/mmu: move get_guest_pgd to " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 07/22] KVM: x86/mmu: move gva_to_gpa " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 08/22] KVM: x86/mmu: move get_pdptr " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 09/22] KVM: x86/mmu: move inject_page_fault " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 10/22] KVM: x86/mmu: move CPU-related fields " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 11/22] KVM: x86/mmu: change CPU-role accessor fields to take " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 12/22] KVM: x86/mmu: move remaining permission fields to " Paolo Bonzini
2026-05-11 15:06 ` [PATCH 13/22] KVM: x86/mmu: pass struct kvm_pagewalk to kvm_mmu_invalidate_addr Paolo Bonzini
2026-05-11 15:06 ` Paolo Bonzini [this message]
2026-05-11 15:06 ` [PATCH 15/22] KVM: x86/mmu: change nested_mmu.w to nested_cpu_walk Paolo Bonzini
2026-05-11 15:06 ` [PATCH 16/22] KVM: x86/mmu: make cpu_walk a value Paolo Bonzini
2026-05-11 15:06 ` [PATCH 17/22] KVM: x86/mmu: pull struct kvm_pagewalk out of struct kvm_mmu Paolo Bonzini
2026-05-11 15:06 ` [PATCH 18/22] KVM: x86/mmu: cleanup functions that initialize shadow MMU Paolo Bonzini
2026-05-11 15:06 ` [PATCH 19/22] KVM: x86/mmu: pull page format to a new struct Paolo Bonzini
2026-05-11 15:06 ` [PATCH 20/22] KVM: x86/mmu: merge struct rsvd_bits_validate into struct kvm_page_format Paolo Bonzini
2026-05-11 15:06 ` [PATCH 21/22] KVM: x86/mmu: parameterize update_permission_bitmask() Paolo Bonzini
2026-05-11 15:06 ` [PATCH 22/22] KVM: x86/mmu: use kvm_page_format to test SPTEs Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260511150648.685374-15-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=jon@nutanix.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox