public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Ben Gardon <bgardon@google.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	David Matlack <dmatlack@google.com>,
	Vipin Sharma <vipinsh@google.com>,
	Nagareddy Reddy <nspreddy@google.com>,
	Ben Gardon <bgardon@google.com>
Subject: [RFC 04/14] KVM: x86/MMU: Expose functions for paging_tmpl.h
Date: Wed, 21 Dec 2022 22:24:08 +0000	[thread overview]
Message-ID: <20221221222418.3307832-5-bgardon@google.com> (raw)
In-Reply-To: <20221221222418.3307832-1-bgardon@google.com>

In preparation for moving paging_tmpl.h to shadow_mmu.c, expose various
functions it needs through mmu_internal.h. This includes modifying the
BUILD_MMU_ROLE_ACCESSOR macro so that it does not automatically include
the static label, since some but not all of the accessors are needed by
paging_tmpl.h.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c          | 32 ++++++++++++++++----------------
 arch/x86/kvm/mmu/mmu_internal.h | 16 ++++++++++++++++
 2 files changed, 32 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index bf14e181eb12..a17e8a79e4df 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -153,18 +153,18 @@ BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
  * and the vCPU may be incorrect/irrelevant.
  */
 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
-static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
+inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
 {								\
 	return !!(mmu->cpu_role. base_or_ext . reg##_##name);	\
 }
 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
-BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
+static BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
-BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
-BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
-BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
+static BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
+static BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
+static BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
-BUILD_MMU_ROLE_ACCESSOR(ext,  efer, lma);
+static BUILD_MMU_ROLE_ACCESSOR(ext,  efer, lma);
 
 static inline bool is_cr0_pg(struct kvm_mmu *mmu)
 {
@@ -210,7 +210,7 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 	kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
 
-static gfn_t get_mmio_spte_gfn(u64 spte)
+gfn_t get_mmio_spte_gfn(u64 spte)
 {
 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
 
@@ -240,7 +240,7 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
 	return likely(kvm_gen == spte_gen);
 }
 
-static int is_cpuid_PSE36(void)
+int is_cpuid_PSE36(void)
 {
 	return 1;
 }
@@ -279,7 +279,7 @@ void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
 	}
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
+int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
 	int r;
 
@@ -818,8 +818,8 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 	return -EFAULT;
 }
 
-static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
-			       unsigned int access)
+int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+			unsigned int access)
 {
 	/* The pfn is invalid, report the error! */
 	if (unlikely(is_error_pfn(fault->pfn)))
@@ -1275,8 +1275,8 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 	return RET_PF_RETRY;
 }
 
-static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
-					 struct kvm_page_fault *fault)
+bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
+				  struct kvm_page_fault *fault)
 {
 	if (unlikely(fault->rsvd))
 		return false;
@@ -1338,7 +1338,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
 	kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
 }
 
-static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
 	struct kvm_memory_slot *slot = fault->slot;
 	bool async;
@@ -1403,8 +1403,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
  * Returns true if the page fault is stale and needs to be retried, i.e. if the
  * root was invalidated by a memslot update or a relevant mmu_notifier fired.
  */
-static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
-				struct kvm_page_fault *fault, int mmu_seq)
+bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+			 int mmu_seq)
 {
 	struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
 
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 74a99b67f09e..957376fcb333 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -341,6 +341,22 @@ bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
 void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu);
 void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu);
 
+int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect);
 bool need_topup_split_caches_or_resched(struct kvm *kvm);
 int topup_split_caches(struct kvm *kvm);
+
+bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+			 int mmu_seq);
+bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
+				  struct kvm_page_fault *fault);
+int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
+int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+			unsigned int access);
+
+gfn_t get_mmio_spte_gfn(u64 spte);
+
+bool is_efer_nx(struct kvm_mmu *mmu);
+bool is_cr4_smep(struct kvm_mmu *mmu);
+bool is_cr0_wp(struct kvm_mmu *mmu);
+int is_cpuid_PSE36(void);
 #endif /* __KVM_X86_MMU_INTERNAL_H */
-- 
2.39.0.314.g84b9a713c41-goog


  parent reply	other threads:[~2022-12-21 22:24 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-21 22:24 [RFC 00/14] KVM: x86/MMU: Formalize the Shadow MMU Ben Gardon
2022-12-21 22:24 ` [RFC 01/14] KVM: x86/MMU: Add shadow_mmu.(c|h) Ben Gardon
2023-02-01 19:45   ` Sean Christopherson
2023-02-01 19:48     ` Ben Gardon
2022-12-21 22:24 ` [RFC 02/14] KVM: x86/MMU: Expose functions for the Shadow MMU Ben Gardon
2022-12-21 22:24 ` Ben Gardon [this message]
2023-01-06 19:49   ` [RFC 04/14] KVM: x86/MMU: Expose functions for paging_tmpl.h David Matlack
2023-01-09 18:47     ` Ben Gardon
2022-12-21 22:24 ` [RFC 05/14] KVM: x86/MMU: Move paging_tmpl.h includes to shadow_mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 06/14] KVM: x86/MMU: Clean up Shadow MMU exports Ben Gardon
2022-12-21 22:24 ` [RFC 07/14] KVM: x86/MMU: Cleanup shrinker interface with Shadow MMU Ben Gardon
2023-01-13 17:43   ` Vipin Sharma
2023-01-13 17:51   ` Vipin Sharma
2022-12-21 22:24 ` [RFC 08/14] KVM: x86/MMU: Clean up naming of exported Shadow MMU functions Ben Gardon
2022-12-21 22:24 ` [RFC 09/14] KVM: x86/MMU: Only make pages available on Shadow MMU fault Ben Gardon
2022-12-29 18:30   ` David Matlack
2022-12-21 22:24 ` [RFC 10/14] KVM: x86/MMU: Fix naming on prepare / commit zap page functions Ben Gardon
2022-12-21 22:24 ` [RFC 11/14] KVM: x86/MMU: Factor Shadow MMU wrprot / clear dirty ops out of mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 12/14] KVM: x86/MMU: Remove unneeded exports from shadow_mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 13/14] KVM: x86/MMU: Wrap uses of kvm_handle_gfn_range in mmu.c Ben Gardon
2023-02-01 21:25   ` Sean Christopherson
2023-02-01 22:30     ` Ben Gardon
2022-12-21 22:24 ` [RFC 14/14] KVM: x86/MMU: Add kvm_shadow_mmu_ to the last few functions in shadow_mmu.h Ben Gardon
     [not found] ` <20221221222418.3307832-4-bgardon@google.com>
2022-12-21 22:40   ` [RFC 03/14] KVM: x86/MMU: Move the Shadow MMU implementation to shadow_mmu.c Ben Gardon
2023-01-13 18:06     ` Vipin Sharma
2023-01-06 19:18 ` [RFC 00/14] KVM: x86/MMU: Formalize the Shadow MMU David Matlack
2023-01-09 18:43   ` Ben Gardon
2023-02-01 20:02 ` Sean Christopherson
2023-02-01 20:45   ` Ben Gardon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221221222418.3307832-5-bgardon@google.com \
    --to=bgardon@google.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=nspreddy@google.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=seanjc@google.com \
    --cc=vipinsh@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox