From: fangyu.yu@linux.alibaba.com
To: pbonzini@redhat.com, corbet@lwn.net, anup@brainfault.org,
atish.patra@linux.dev, pjw@kernel.org, palmer@dabbelt.com,
aou@eecs.berkeley.edu, alex@ghiti.fr, skhan@linuxfoundation.org
Cc: guoren@kernel.org, radim.krcmar@oss.qualcomm.com,
andrew.jones@oss.qualcomm.com, linux-doc@vger.kernel.org,
kvm@vger.kernel.org, kvm-riscv@lists.infradead.org,
linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org,
Fangyu Yu <fangyu.yu@linux.alibaba.com>
Subject: [PATCH v6 2/4] RISC-V: KVM: Cache gstage pgd_levels in struct kvm_gstage
Date: Mon, 30 Mar 2026 20:25:59 +0800 [thread overview]
Message-ID: <20260330122601.22140-3-fangyu.yu@linux.alibaba.com> (raw)
In-Reply-To: <20260330122601.22140-1-fangyu.yu@linux.alibaba.com>
From: Fangyu Yu <fangyu.yu@linux.alibaba.com>
Gstage page-table helpers frequently chase gstage->kvm->arch to
fetch pgd_levels. This adds noise and repeats the same dereference
chain in hot paths.
Add pgd_levels to struct kvm_gstage and initialize it from kvm->arch
when setting up a gstage instance. Introduce kvm_riscv_gstage_init()
to centralize initialization and switch gstage code to use
gstage->pgd_levels.
Suggested-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
---
arch/riscv/include/asm/kvm_gstage.h | 10 ++++++
arch/riscv/kvm/gstage.c | 10 +++---
arch/riscv/kvm/mmu.c | 50 ++++++-----------------------
3 files changed, 25 insertions(+), 45 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_gstage.h b/arch/riscv/include/asm/kvm_gstage.h
index 5aa58d1f692a..70d9d483365e 100644
--- a/arch/riscv/include/asm/kvm_gstage.h
+++ b/arch/riscv/include/asm/kvm_gstage.h
@@ -15,6 +15,7 @@ struct kvm_gstage {
#define KVM_GSTAGE_FLAGS_LOCAL BIT(0)
unsigned long vmid;
pgd_t *pgd;
+ unsigned long pgd_levels;
};
struct kvm_gstage_mapping {
@@ -92,4 +93,13 @@ static inline unsigned long kvm_riscv_gstage_mode(unsigned long pgd_levels)
}
}
+static inline void kvm_riscv_gstage_init(struct kvm_gstage *gstage, struct kvm *kvm)
+{
+ gstage->kvm = kvm;
+ gstage->flags = 0;
+ gstage->vmid = READ_ONCE(kvm->arch.vmid.vmid);
+ gstage->pgd = kvm->arch.pgd;
+ gstage->pgd_levels = kvm->arch.pgd_levels;
+}
+
#endif
diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
index 4beb9322fe76..7c4c34bc191b 100644
--- a/arch/riscv/kvm/gstage.c
+++ b/arch/riscv/kvm/gstage.c
@@ -26,7 +26,7 @@ static inline unsigned long gstage_pte_index(struct kvm_gstage *gstage,
unsigned long mask;
unsigned long shift = HGATP_PAGE_SHIFT + (kvm_riscv_gstage_index_bits * level);
- if (level == gstage->kvm->arch.pgd_levels - 1)
+ if (level == gstage->pgd_levels - 1)
mask = (PTRS_PER_PTE * (1UL << kvm_riscv_gstage_pgd_xbits)) - 1;
else
mask = PTRS_PER_PTE - 1;
@@ -45,7 +45,7 @@ static int gstage_page_size_to_level(struct kvm_gstage *gstage, unsigned long pa
u32 i;
unsigned long psz = 1UL << 12;
- for (i = 0; i < gstage->kvm->arch.pgd_levels; i++) {
+ for (i = 0; i < gstage->pgd_levels; i++) {
if (page_size == (psz << (i * kvm_riscv_gstage_index_bits))) {
*out_level = i;
return 0;
@@ -58,7 +58,7 @@ static int gstage_page_size_to_level(struct kvm_gstage *gstage, unsigned long pa
static int gstage_level_to_page_order(struct kvm_gstage *gstage, u32 level,
unsigned long *out_pgorder)
{
- if (gstage->kvm->arch.pgd_levels < level)
+ if (gstage->pgd_levels < level)
return -EINVAL;
*out_pgorder = 12 + (level * kvm_riscv_gstage_index_bits);
@@ -83,7 +83,7 @@ bool kvm_riscv_gstage_get_leaf(struct kvm_gstage *gstage, gpa_t addr,
pte_t **ptepp, u32 *ptep_level)
{
pte_t *ptep;
- u32 current_level = gstage->kvm->arch.pgd_levels - 1;
+ u32 current_level = gstage->pgd_levels - 1;
*ptep_level = current_level;
ptep = (pte_t *)gstage->pgd;
@@ -127,7 +127,7 @@ int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
struct kvm_mmu_memory_cache *pcache,
const struct kvm_gstage_mapping *map)
{
- u32 current_level = gstage->kvm->arch.pgd_levels - 1;
+ u32 current_level = gstage->pgd_levels - 1;
pte_t *next_ptep = (pte_t *)gstage->pgd;
pte_t *ptep = &next_ptep[gstage_pte_index(gstage, map->addr, current_level)];
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index fbcdd75cb9af..2d3def024270 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -24,10 +24,7 @@ static void mmu_wp_memory_region(struct kvm *kvm, int slot)
phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
struct kvm_gstage gstage;
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
spin_lock(&kvm->mmu_lock);
kvm_riscv_gstage_wp_range(&gstage, start, end);
@@ -49,10 +46,7 @@ int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
struct kvm_gstage_mapping map;
struct kvm_gstage gstage;
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
@@ -89,10 +83,7 @@ void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
{
struct kvm_gstage gstage;
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
spin_lock(&kvm->mmu_lock);
kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
@@ -109,10 +100,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
struct kvm_gstage gstage;
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
kvm_riscv_gstage_wp_range(&gstage, start, end);
}
@@ -141,10 +129,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
phys_addr_t size = slot->npages << PAGE_SHIFT;
struct kvm_gstage gstage;
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
spin_lock(&kvm->mmu_lock);
kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
@@ -250,10 +235,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
if (!kvm->arch.pgd)
return false;
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
mmu_locked = spin_trylock(&kvm->mmu_lock);
kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
@@ -275,10 +257,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
&ptep, &ptep_level))
return false;
@@ -298,10 +277,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
&ptep, &ptep_level))
return false;
@@ -463,10 +439,7 @@ int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
struct kvm_gstage gstage;
struct page *page;
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
/* Setup initial state of output mapping */
memset(out_map, 0, sizeof(*out_map));
@@ -587,10 +560,7 @@ void kvm_riscv_mmu_free_pgd(struct kvm *kvm)
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
- gstage.kvm = kvm;
- gstage.flags = 0;
- gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
- gstage.pgd = kvm->arch.pgd;
+ kvm_riscv_gstage_init(&gstage, kvm);
kvm_riscv_gstage_unmap_range(&gstage, 0UL,
kvm_riscv_gstage_gpa_size(kvm->arch.pgd_levels), false);
pgd = READ_ONCE(kvm->arch.pgd);
--
2.50.1
next prev parent reply other threads:[~2026-03-30 12:26 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-30 12:25 [PATCH v6 0/4] Support runtime configuration for per-VM's HGATP mode fangyu.yu
2026-03-30 12:25 ` [PATCH v6 1/4] RISC-V: KVM: " fangyu.yu
2026-03-30 12:25 ` fangyu.yu [this message]
2026-03-30 12:26 ` [PATCH v6 3/4] RISC-V: KVM: Detect and expose supported HGATP G-stage modes fangyu.yu
2026-03-30 12:26 ` [PATCH v6 4/4] RISC-V: KVM: add KVM_CAP_RISCV_SET_HGATP_MODE fangyu.yu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260330122601.22140-3-fangyu.yu@linux.alibaba.com \
--to=fangyu.yu@linux.alibaba.com \
--cc=alex@ghiti.fr \
--cc=andrew.jones@oss.qualcomm.com \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=atish.patra@linux.dev \
--cc=corbet@lwn.net \
--cc=guoren@kernel.org \
--cc=kvm-riscv@lists.infradead.org \
--cc=kvm@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=palmer@dabbelt.com \
--cc=pbonzini@redhat.com \
--cc=pjw@kernel.org \
--cc=radim.krcmar@oss.qualcomm.com \
--cc=skhan@linuxfoundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox