From: David Matlack <dmatlack@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Marc Zyngier <maz@kernel.org>,
Huacai Chen <chenhuacai@kernel.org>,
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
Anup Patel <anup@brainfault.org>,
Paul Walmsley <paul.walmsley@sifive.com>,
Palmer Dabbelt <palmer@dabbelt.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Sean Christopherson <seanjc@google.com>,
Andrew Jones <drjones@redhat.com>,
Ben Gardon <bgardon@google.com>, Peter Xu <peterx@redhat.com>,
maciej.szmigiero@oracle.com,
"moderated list:KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)"
<kvmarm@lists.cs.columbia.edu>,
"open list:KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)"
<linux-mips@vger.kernel.org>,
"open list:KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)"
<kvm@vger.kernel.org>,
"open list:KERNEL VIRTUAL MACHINE FOR RISC-V (KVM/riscv)"
<kvm-riscv@lists.infradead.org>,
Peter Feiner <pfeiner@google.com>,
David Matlack <dmatlack@google.com>
Subject: [PATCH v2 13/26] KVM: x86/mmu: Pass const memslot to init_shadow_page() and descendants
Date: Fri, 11 Mar 2022 00:25:15 +0000 [thread overview]
Message-ID: <20220311002528.2230172-14-dmatlack@google.com> (raw)
In-Reply-To: <20220311002528.2230172-1-dmatlack@google.com>
Use a const pointer so that init_shadow_page() can be called from
contexts where we have a const pointer.
No functional change intended.
Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: David Matlack <dmatlack@google.com>
---
arch/x86/include/asm/kvm_page_track.h | 2 +-
arch/x86/kvm/mmu/mmu.c | 6 +++---
arch/x86/kvm/mmu/mmu_internal.h | 2 +-
arch/x86/kvm/mmu/page_track.c | 4 ++--
arch/x86/kvm/mmu/tdp_mmu.c | 2 +-
arch/x86/kvm/mmu/tdp_mmu.h | 2 +-
6 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index eb186bc57f6a..3a2dc183ae9a 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -58,7 +58,7 @@ int kvm_page_track_create_memslot(struct kvm *kvm,
unsigned long npages);
void kvm_slot_page_track_add_page(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
+ const struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode);
void kvm_slot_page_track_remove_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 23c0a36ac93f..d7ad71be6c52 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -794,7 +794,7 @@ void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
}
static void account_shadowed(struct kvm *kvm,
- struct kvm_memory_slot *slot,
+ const struct kvm_memory_slot *slot,
struct kvm_mmu_page *sp)
{
gfn_t gfn;
@@ -1373,7 +1373,7 @@ int kvm_cpu_dirty_log_size(void)
}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn,
+ const struct kvm_memory_slot *slot, u64 gfn,
int min_level)
{
struct kvm_rmap_head *rmap_head;
@@ -2151,7 +2151,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
}
static void init_shadow_page(struct kvm *kvm, struct kvm_mmu_page *sp,
- struct kvm_memory_slot *slot, gfn_t gfn,
+ const struct kvm_memory_slot *slot, gfn_t gfn,
union kvm_mmu_page_role role)
{
struct hlist_head *sp_list;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d4e2de5f2a6d..b6e22ba9c654 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -134,7 +134,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn,
+ const struct kvm_memory_slot *slot, u64 gfn,
int min_level);
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages);
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 2e09d1b6249f..3e7901294573 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -84,7 +84,7 @@ int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
return 0;
}
-static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
+static void update_gfn_track(const struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode, short count)
{
int index, val;
@@ -112,7 +112,7 @@ static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
* @mode: tracking mode, currently only write track is supported.
*/
void kvm_slot_page_track_add_page(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
+ const struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode)
{
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index f285fd76717b..85b7bc333302 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1768,7 +1768,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
* Returns true if an SPTE was set and a TLB flush is needed.
*/
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
+ const struct kvm_memory_slot *slot, gfn_t gfn,
int min_level)
{
struct kvm_mmu_page *root;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 54bc8118c40a..8308bfa4126b 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -42,7 +42,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
+ const struct kvm_memory_slot *slot, gfn_t gfn,
int min_level);
void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
--
2.35.1.723.g4982287a31-goog
next prev parent reply other threads:[~2022-03-11 0:25 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-11 0:25 [PATCH v2 00/26] Extend Eager Page Splitting to the shadow MMU David Matlack
2022-03-11 0:25 ` [PATCH v2 01/26] KVM: x86/mmu: Optimize MMU page cache lookup for all direct SPs David Matlack
2022-03-15 7:40 ` Peter Xu
2022-03-22 18:16 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 02/26] KVM: x86/mmu: Use a bool for direct David Matlack
2022-03-15 7:46 ` Peter Xu
2022-03-22 18:21 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 03/26] KVM: x86/mmu: Derive shadow MMU page role from parent David Matlack
2022-03-15 8:15 ` Peter Xu
2022-03-22 18:30 ` David Matlack
2022-03-30 14:25 ` Peter Xu
2022-03-11 0:25 ` [PATCH v2 04/26] KVM: x86/mmu: Decompose kvm_mmu_get_page() into separate functions David Matlack
2022-03-15 8:50 ` Peter Xu
2022-03-22 22:09 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 05/26] KVM: x86/mmu: Rename shadow MMU functions that deal with shadow pages David Matlack
2022-03-15 8:52 ` Peter Xu
2022-03-22 21:35 ` David Matlack
2022-03-30 14:28 ` Peter Xu
2022-03-11 0:25 ` [PATCH v2 06/26] KVM: x86/mmu: Pass memslot to kvm_mmu_new_shadow_page() David Matlack
2022-03-15 9:03 ` Peter Xu
2022-03-22 22:05 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 07/26] KVM: x86/mmu: Separate shadow MMU sp allocation from initialization David Matlack
2022-03-15 9:54 ` Peter Xu
2022-03-11 0:25 ` [PATCH v2 08/26] KVM: x86/mmu: Link spt to sp during allocation David Matlack
2022-03-15 10:04 ` Peter Xu
2022-03-22 22:30 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 09/26] KVM: x86/mmu: Move huge page split sp allocation code to mmu.c David Matlack
2022-03-15 10:17 ` Peter Xu
2022-03-11 0:25 ` [PATCH v2 10/26] KVM: x86/mmu: Use common code to free kvm_mmu_page structs David Matlack
2022-03-15 10:22 ` Peter Xu
2022-03-22 22:33 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 11/26] KVM: x86/mmu: Use common code to allocate kvm_mmu_page structs from vCPU caches David Matlack
2022-03-15 10:27 ` Peter Xu
2022-03-22 22:35 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 12/26] KVM: x86/mmu: Pass const memslot to rmap_add() David Matlack
2022-03-11 0:25 ` David Matlack [this message]
2022-03-11 0:25 ` [PATCH v2 14/26] KVM: x86/mmu: Decouple rmap_add() and link_shadow_page() from kvm_vcpu David Matlack
2022-03-15 10:37 ` Peter Xu
2022-03-11 0:25 ` [PATCH v2 15/26] KVM: x86/mmu: Update page stats in __rmap_add() David Matlack
2022-03-15 10:39 ` Peter Xu
2022-03-11 0:25 ` [PATCH v2 16/26] KVM: x86/mmu: Cache the access bits of shadowed translations David Matlack
2022-03-16 8:32 ` Peter Xu
2022-03-22 22:51 ` David Matlack
2022-03-30 18:30 ` Peter Xu
2022-03-31 21:40 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 17/26] KVM: x86/mmu: Pass access information to make_huge_page_split_spte() David Matlack
2022-03-16 8:44 ` Peter Xu
2022-03-22 23:08 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 18/26] KVM: x86/mmu: Zap collapsible SPTEs at all levels in the shadow MMU David Matlack
2022-03-16 8:49 ` Peter Xu
2022-03-22 23:11 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 19/26] KVM: x86/mmu: Refactor drop_large_spte() David Matlack
2022-03-16 8:53 ` Peter Xu
2022-03-11 0:25 ` [PATCH v2 20/26] KVM: x86/mmu: Extend Eager Page Splitting to the shadow MMU David Matlack
2022-03-16 10:26 ` Peter Xu
2022-03-22 0:07 ` David Matlack
2022-03-22 23:58 ` David Matlack
2022-03-30 18:34 ` Peter Xu
2022-03-31 19:57 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 21/26] KVM: Allow for different capacities in kvm_mmu_memory_cache structs David Matlack
2022-03-19 5:27 ` Anup Patel
2022-03-22 23:13 ` David Matlack
2022-03-11 0:25 ` [PATCH v2 22/26] KVM: Allow GFP flags to be passed when topping up MMU caches David Matlack
2022-03-11 0:25 ` [PATCH v2 23/26] KVM: x86/mmu: Fully split huge pages that require extra pte_list_desc structs David Matlack
2022-03-11 0:25 ` [PATCH v2 24/26] KVM: x86/mmu: Split huge pages aliased by multiple SPTEs David Matlack
2022-03-11 0:25 ` [PATCH v2 25/26] KVM: x86/mmu: Drop NULL pte_list_desc_cache fallback David Matlack
2022-03-11 0:25 ` [PATCH v2 26/26] KVM: selftests: Map x86_64 guest virtual memory with huge pages David Matlack
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220311002528.2230172-14-dmatlack@google.com \
--to=dmatlack@google.com \
--cc=aleksandar.qemu.devel@gmail.com \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=bgardon@google.com \
--cc=chenhuacai@kernel.org \
--cc=drjones@redhat.com \
--cc=kvm-riscv@lists.infradead.org \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-mips@vger.kernel.org \
--cc=maciej.szmigiero@oracle.com \
--cc=maz@kernel.org \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=pfeiner@google.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox