From: Ben Gardon <bgardon@google.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
Sean Christopherson <seanjc@google.com>,
David Matlack <dmatlack@google.com>,
Vipin Sharma <vipinsh@google.com>,
Nagareddy Reddy <nspreddy@google.com>,
Ben Gardon <bgardon@google.com>
Subject: [RFC 02/14] KVM: x86/MMU: Expose functions for the Shadow MMU
Date: Wed, 21 Dec 2022 22:24:06 +0000 [thread overview]
Message-ID: <20221221222418.3307832-3-bgardon@google.com> (raw)
In-Reply-To: <20221221222418.3307832-1-bgardon@google.com>
Expose various common MMU functions which the Shadow MMU will need via
mmu_internal.h. This just slightly reduces the work needed to move the
shadow MMU code out of mmu.c, which will already be a massive change.
No functional change intended.
Signed-off-by: Ben Gardon <bgardon@google.com>
---
arch/x86/kvm/mmu/mmu.c | 41 ++++++++++++++-------------------
arch/x86/kvm/mmu/mmu_internal.h | 24 +++++++++++++++++++
2 files changed, 41 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 07b99a7ce830..729a2799d4d7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -156,9 +156,9 @@ struct kvm_shadow_walk_iterator {
({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
__shadow_walk_next(&(_walker), spte))
-static struct kmem_cache *pte_list_desc_cache;
+struct kmem_cache *pte_list_desc_cache;
struct kmem_cache *mmu_page_header_cache;
-static struct percpu_counter kvm_total_used_mmu_pages;
+struct percpu_counter kvm_total_used_mmu_pages;
static void mmu_spte_set(u64 *sptep, u64 spte);
@@ -234,11 +234,6 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
return regs;
}
-static inline bool kvm_available_flush_tlb_with_range(void)
-{
- return kvm_x86_ops.tlb_remote_flush_with_range;
-}
-
static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
@@ -262,8 +257,8 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range(kvm, &range);
}
-static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
- unsigned int access)
+void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
+ unsigned int access)
{
u64 spte = make_mmio_spte(vcpu, gfn, access);
@@ -610,7 +605,7 @@ static bool mmu_spte_age(u64 *sptep)
return true;
}
-static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
+void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
if (is_tdp_mmu(vcpu->arch.mmu)) {
kvm_tdp_mmu_walk_lockless_begin();
@@ -629,7 +624,7 @@ static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
}
}
-static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
+void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
if (is_tdp_mmu(vcpu->arch.mmu)) {
kvm_tdp_mmu_walk_lockless_end();
@@ -822,8 +817,8 @@ void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
&kvm->arch.possible_nx_huge_pages);
}
-static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
- bool nx_huge_page_possible)
+void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ bool nx_huge_page_possible)
{
sp->nx_huge_page_disallowed = true;
@@ -857,16 +852,15 @@ void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
list_del_init(&sp->possible_nx_huge_page_link);
}
-static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
sp->nx_huge_page_disallowed = false;
untrack_possible_nx_huge_page(kvm, sp);
}
-static struct kvm_memory_slot *
-gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool no_dirty_log)
+struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
+ gfn_t gfn, bool no_dirty_log)
{
struct kvm_memory_slot *slot;
@@ -1403,7 +1397,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
return write_protected;
}
-static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
+bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
{
struct kvm_memory_slot *slot;
@@ -1902,9 +1896,8 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return ret;
}
-static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
- struct list_head *invalid_list,
- bool remote_flush)
+bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, struct list_head *invalid_list,
+ bool remote_flush)
{
if (!remote_flush && list_empty(invalid_list))
return false;
@@ -1916,7 +1909,7 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
return true;
}
-static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
if (sp->role.invalid)
return true;
@@ -6148,7 +6141,7 @@ static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
}
-static bool need_topup_split_caches_or_resched(struct kvm *kvm)
+bool need_topup_split_caches_or_resched(struct kvm *kvm)
{
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
return true;
@@ -6163,7 +6156,7 @@ static bool need_topup_split_caches_or_resched(struct kvm *kvm)
need_topup(&kvm->arch.split_shadow_page_cache, 1);
}
-static int topup_split_caches(struct kvm *kvm)
+int topup_split_caches(struct kvm *kvm)
{
/*
* Allocating rmap list entries when splitting huge pages for nested
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index dbaf6755c5a7..856e2e0a8420 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -131,7 +131,9 @@ struct kvm_mmu_page {
#endif
};
+extern struct kmem_cache *pte_list_desc_cache;
extern struct kmem_cache *mmu_page_header_cache;
+extern struct percpu_counter kvm_total_used_mmu_pages;
static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
{
@@ -317,6 +319,28 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ bool nx_huge_page_possible);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+static inline bool kvm_available_flush_tlb_with_range(void)
+{
+ return kvm_x86_ops.tlb_remote_flush_with_range;
+}
+
+void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
+ unsigned int access);
+struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
+ gfn_t gfn, bool no_dirty_log);
+bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn);
+bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, struct list_head *invalid_list,
+ bool remote_flush);
+bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
+
+void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu);
+void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu);
+
+bool need_topup_split_caches_or_resched(struct kvm *kvm);
+int topup_split_caches(struct kvm *kvm);
#endif /* __KVM_X86_MMU_INTERNAL_H */
--
2.39.0.314.g84b9a713c41-goog
next prev parent reply other threads:[~2022-12-21 22:24 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-21 22:24 [RFC 00/14] KVM: x86/MMU: Formalize the Shadow MMU Ben Gardon
2022-12-21 22:24 ` [RFC 01/14] KVM: x86/MMU: Add shadow_mmu.(c|h) Ben Gardon
2023-02-01 19:45 ` Sean Christopherson
2023-02-01 19:48 ` Ben Gardon
2022-12-21 22:24 ` Ben Gardon [this message]
2022-12-21 22:24 ` [RFC 04/14] KVM: x86/MMU: Expose functions for paging_tmpl.h Ben Gardon
2023-01-06 19:49 ` David Matlack
2023-01-09 18:47 ` Ben Gardon
2022-12-21 22:24 ` [RFC 05/14] KVM: x86/MMU: Move paging_tmpl.h includes to shadow_mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 06/14] KVM: x86/MMU: Clean up Shadow MMU exports Ben Gardon
2022-12-21 22:24 ` [RFC 07/14] KVM: x86/MMU: Cleanup shrinker interface with Shadow MMU Ben Gardon
2023-01-13 17:43 ` Vipin Sharma
2023-01-13 17:51 ` Vipin Sharma
2022-12-21 22:24 ` [RFC 08/14] KVM: x86/MMU: Clean up naming of exported Shadow MMU functions Ben Gardon
2022-12-21 22:24 ` [RFC 09/14] KVM: x86/MMU: Only make pages available on Shadow MMU fault Ben Gardon
2022-12-29 18:30 ` David Matlack
2022-12-21 22:24 ` [RFC 10/14] KVM: x86/MMU: Fix naming on prepare / commit zap page functions Ben Gardon
2022-12-21 22:24 ` [RFC 11/14] KVM: x86/MMU: Factor Shadow MMU wrprot / clear dirty ops out of mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 12/14] KVM: x86/MMU: Remove unneeded exports from shadow_mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 13/14] KVM: x86/MMU: Wrap uses of kvm_handle_gfn_range in mmu.c Ben Gardon
2023-02-01 21:25 ` Sean Christopherson
2023-02-01 22:30 ` Ben Gardon
2022-12-21 22:24 ` [RFC 14/14] KVM: x86/MMU: Add kvm_shadow_mmu_ to the last few functions in shadow_mmu.h Ben Gardon
[not found] ` <20221221222418.3307832-4-bgardon@google.com>
2022-12-21 22:40 ` [RFC 03/14] KVM: x86/MMU: Move the Shadow MMU implementation to shadow_mmu.c Ben Gardon
2023-01-13 18:06 ` Vipin Sharma
2023-01-06 19:18 ` [RFC 00/14] KVM: x86/MMU: Formalize the Shadow MMU David Matlack
2023-01-09 18:43 ` Ben Gardon
2023-02-01 20:02 ` Sean Christopherson
2023-02-01 20:45 ` Ben Gardon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221221222418.3307832-3-bgardon@google.com \
--to=bgardon@google.com \
--cc=dmatlack@google.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=nspreddy@google.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=seanjc@google.com \
--cc=vipinsh@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox