From: Anup Patel <apatel@ventanamicro.com>
To: Atish Patra <atish.patra@linux.dev>
Cc: Palmer Dabbelt <palmer@dabbelt.com>,
Paul Walmsley <paul.walmsley@sifive.com>,
Alexandre Ghiti <alex@ghiti.fr>,
Andrew Jones <ajones@ventanamicro.com>,
Anup Patel <anup@brainfault.org>,
kvm@vger.kernel.org, kvm-riscv@lists.infradead.org,
linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org,
Anup Patel <apatel@ventanamicro.com>
Subject: [PATCH 13/13] RISC-V: KVM: Pass VMID as parameter to kvm_riscv_hfence_xyz() APIs
Date: Thu, 5 Jun 2025 11:44:58 +0530 [thread overview]
Message-ID: <20250605061458.196003-14-apatel@ventanamicro.com> (raw)
In-Reply-To: <20250605061458.196003-1-apatel@ventanamicro.com>
Currently, all kvm_riscv_hfence_xyz() APIs assume VMID to be the
host VMID of the Guest/VM which resticts use of these APIs only
for host TLB maintenance. Let's allow passing VMID as parameter
to all kvm_riscv_hfence_xyz() APIs so that they can be re-used
for nested virtualization related TLB maintenance.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
---
arch/riscv/include/asm/kvm_tlb.h | 17 ++++++---
arch/riscv/kvm/gstage.c | 3 +-
arch/riscv/kvm/tlb.c | 61 ++++++++++++++++++++-----------
arch/riscv/kvm/vcpu_sbi_replace.c | 17 +++++----
arch/riscv/kvm/vcpu_sbi_v01.c | 25 ++++++-------
5 files changed, 73 insertions(+), 50 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_tlb.h b/arch/riscv/include/asm/kvm_tlb.h
index f67e03edeaec..38a2f933ad3a 100644
--- a/arch/riscv/include/asm/kvm_tlb.h
+++ b/arch/riscv/include/asm/kvm_tlb.h
@@ -11,9 +11,11 @@
enum kvm_riscv_hfence_type {
KVM_RISCV_HFENCE_UNKNOWN = 0,
KVM_RISCV_HFENCE_GVMA_VMID_GPA,
+ KVM_RISCV_HFENCE_GVMA_VMID_ALL,
KVM_RISCV_HFENCE_VVMA_ASID_GVA,
KVM_RISCV_HFENCE_VVMA_ASID_ALL,
KVM_RISCV_HFENCE_VVMA_GVA,
+ KVM_RISCV_HFENCE_VVMA_ALL
};
struct kvm_riscv_hfence {
@@ -59,21 +61,24 @@ void kvm_riscv_fence_i(struct kvm *kvm,
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
gpa_t gpa, gpa_t gpsz,
- unsigned long order);
+ unsigned long order, unsigned long vmid);
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask);
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid);
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
- unsigned long order, unsigned long asid);
+ unsigned long order, unsigned long asid,
+ unsigned long vmid);
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
- unsigned long asid);
+ unsigned long asid, unsigned long vmid);
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
- unsigned long order);
+ unsigned long order, unsigned long vmid);
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask);
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid);
#endif
diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
index c7d61f14f6be..c5dc47b156c4 100644
--- a/arch/riscv/kvm/gstage.c
+++ b/arch/riscv/kvm/gstage.c
@@ -117,7 +117,8 @@ static void gstage_tlb_flush(struct kvm_gstage *gstage, u32 level, gpa_t addr)
if (gstage->flags & KVM_GSTAGE_FLAGS_LOCAL)
kvm_riscv_local_hfence_gvma_vmid_gpa(gstage->vmid, addr, BIT(order), order);
else
- kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order);
+ kvm_riscv_hfence_gvma_vmid_gpa(gstage->kvm, -1UL, 0, addr, BIT(order), order,
+ gstage->vmid);
}
int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index 349fcfc93f54..3c5a70a2b927 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -251,6 +251,12 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
d.size, d.order);
break;
+ case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
+ if (kvm_riscv_nacl_available())
+ nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
+ else
+ kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
+ break;
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
if (kvm_riscv_nacl_available())
@@ -276,6 +282,13 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
d.size, d.order);
break;
+ case KVM_RISCV_HFENCE_VVMA_ALL:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
+ if (kvm_riscv_nacl_available())
+ nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
+ else
+ kvm_riscv_local_hfence_vvma_all(d.vmid);
+ break;
default:
break;
}
@@ -328,14 +341,13 @@ void kvm_riscv_fence_i(struct kvm *kvm,
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
gpa_t gpa, gpa_t gpsz,
- unsigned long order)
+ unsigned long order, unsigned long vmid)
{
- struct kvm_vmid *v = &kvm->arch.vmid;
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
data.asid = 0;
- data.vmid = READ_ONCE(v->vmid);
+ data.vmid = vmid;
data.addr = gpa;
data.size = gpsz;
data.order = order;
@@ -344,23 +356,28 @@ void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
}
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask)
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid)
{
- make_xfence_request(kvm, hbase, hmask, KVM_REQ_TLB_FLUSH,
- KVM_REQ_TLB_FLUSH, NULL);
+ struct kvm_riscv_hfence data = {0};
+
+ data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
+ data.vmid = vmid;
+ make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+ KVM_REQ_TLB_FLUSH, &data);
}
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
- unsigned long order, unsigned long asid)
+ unsigned long order, unsigned long asid,
+ unsigned long vmid)
{
- struct kvm_vmid *v = &kvm->arch.vmid;
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
data.asid = asid;
- data.vmid = READ_ONCE(v->vmid);
+ data.vmid = vmid;
data.addr = gva;
data.size = gvsz;
data.order = order;
@@ -370,15 +387,13 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
- unsigned long asid)
+ unsigned long asid, unsigned long vmid)
{
- struct kvm_vmid *v = &kvm->arch.vmid;
- struct kvm_riscv_hfence data;
+ struct kvm_riscv_hfence data = {0};
data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
data.asid = asid;
- data.vmid = READ_ONCE(v->vmid);
- data.addr = data.size = data.order = 0;
+ data.vmid = vmid;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
KVM_REQ_HFENCE_VVMA_ALL, &data);
}
@@ -386,14 +401,13 @@ void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
- unsigned long order)
+ unsigned long order, unsigned long vmid)
{
- struct kvm_vmid *v = &kvm->arch.vmid;
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_GVA;
data.asid = 0;
- data.vmid = READ_ONCE(v->vmid);
+ data.vmid = vmid;
data.addr = gva;
data.size = gvsz;
data.order = order;
@@ -402,16 +416,21 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
}
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
- unsigned long hbase, unsigned long hmask)
+ unsigned long hbase, unsigned long hmask,
+ unsigned long vmid)
{
- make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
- KVM_REQ_HFENCE_VVMA_ALL, NULL);
+ struct kvm_riscv_hfence data = {0};
+
+ data.type = KVM_RISCV_HFENCE_VVMA_ALL;
+ data.vmid = vmid;
+ make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+ KVM_REQ_HFENCE_VVMA_ALL, &data);
}
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{
kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
- PAGE_SHIFT);
+ PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
return 0;
}
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
index b17fad091bab..b490ed1428a6 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -96,6 +96,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
unsigned long funcid = cp->a6;
+ unsigned long vmid;
switch (funcid) {
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
@@ -103,22 +104,22 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
- kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
+ kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid);
else
kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
- cp->a2, cp->a3, PAGE_SHIFT);
+ cp->a2, cp->a3, PAGE_SHIFT, vmid);
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
- kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
- hbase, hmask, cp->a4);
+ kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask,
+ cp->a4, vmid);
else
- kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
- hbase, hmask,
- cp->a2, cp->a3,
- PAGE_SHIFT, cp->a4);
+ kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2,
+ cp->a3, PAGE_SHIFT, cp->a4, vmid);
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c
index 8f4c4fa16227..368dfddd23d9 100644
--- a/arch/riscv/kvm/vcpu_sbi_v01.c
+++ b/arch/riscv/kvm/vcpu_sbi_v01.c
@@ -23,6 +23,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm *kvm = vcpu->kvm;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_cpu_trap *utrap = retdata->utrap;
+ unsigned long vmid;
switch (cp->a7) {
case SBI_EXT_0_1_CONSOLE_GETCHAR:
@@ -78,25 +79,21 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
kvm_riscv_fence_i(vcpu->kvm, 0, hmask);
else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) {
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if (cp->a1 == 0 && cp->a2 == 0)
- kvm_riscv_hfence_vvma_all(vcpu->kvm,
- 0, hmask);
+ kvm_riscv_hfence_vvma_all(vcpu->kvm, 0, hmask, vmid);
else
- kvm_riscv_hfence_vvma_gva(vcpu->kvm,
- 0, hmask,
- cp->a1, cp->a2,
- PAGE_SHIFT);
+ kvm_riscv_hfence_vvma_gva(vcpu->kvm, 0, hmask, cp->a1,
+ cp->a2, PAGE_SHIFT, vmid);
} else {
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
if (cp->a1 == 0 && cp->a2 == 0)
- kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
- 0, hmask,
- cp->a3);
+ kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, 0, hmask,
+ cp->a3, vmid);
else
- kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
- 0, hmask,
- cp->a1, cp->a2,
- PAGE_SHIFT,
- cp->a3);
+ kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, 0, hmask,
+ cp->a1, cp->a2, PAGE_SHIFT,
+ cp->a3, vmid);
}
break;
default:
--
2.43.0
prev parent reply other threads:[~2025-06-05 6:15 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-05 6:14 [PATCH 00/13] MMU related improvements for KVM RISC-V Anup Patel
2025-06-05 6:14 ` [PATCH 01/13] RISC-V: KVM: Fix the size parameter check in SBI SFENCE calls Anup Patel
2025-06-06 0:00 ` Atish Patra
2025-06-09 4:43 ` Anup Patel
2025-06-05 6:14 ` [PATCH 02/13] RISC-V: KVM: Don't treat SBI HFENCE calls as NOPs Anup Patel
2025-06-06 0:11 ` Atish Patra
2025-06-09 4:43 ` Anup Patel
2025-06-05 6:14 ` [PATCH 03/13] RISC-V: KVM: Check kvm_riscv_vcpu_alloc_vector_context() return value Anup Patel
2025-06-06 0:16 ` Atish Patra
2025-06-09 5:01 ` Anup Patel
2025-06-05 6:14 ` [PATCH 04/13] RISC-V: KVM: Drop the return value of kvm_riscv_vcpu_aia_init() Anup Patel
2025-06-06 0:52 ` Atish Patra
2025-06-06 4:14 ` Nutty Liu
2025-06-05 6:14 ` [PATCH 05/13] RISC-V: KVM: Rename and move kvm_riscv_local_tlb_sanitize() Anup Patel
2025-06-06 1:21 ` Atish Patra
2025-06-06 4:25 ` Nutty Liu
2025-06-05 6:14 ` [PATCH 06/13] RISC-V: KVM: Replace KVM_REQ_HFENCE_GVMA_VMID_ALL with KVM_REQ_TLB_FLUSH Anup Patel
2025-06-06 1:24 ` Atish Patra
2025-06-05 6:14 ` [PATCH 07/13] RISC-V: KVM: Don't flush TLB in gstage_set_pte() when PTE is unchanged Anup Patel
2025-06-05 6:14 ` [PATCH 08/13] RISC-V: KVM: Implement kvm_arch_flush_remote_tlbs_range() Anup Patel
2025-06-05 6:14 ` [PATCH 09/13] RISC-V: KVM: Factor-out MMU related declarations into separate headers Anup Patel
2025-06-05 6:14 ` [PATCH 10/13] RISC-V: KVM: Introduce struct kvm_gstage_mapping Anup Patel
2025-06-05 6:14 ` [PATCH 11/13] RISC-V: KVM: Add vmid field to struct kvm_riscv_hfence Anup Patel
2025-06-05 6:14 ` [PATCH 12/13] RISC-V: KVM: Factor-out g-stage page table management Anup Patel
2025-06-05 6:14 ` Anup Patel [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250605061458.196003-14-apatel@ventanamicro.com \
--to=apatel@ventanamicro.com \
--cc=ajones@ventanamicro.com \
--cc=alex@ghiti.fr \
--cc=anup@brainfault.org \
--cc=atish.patra@linux.dev \
--cc=kvm-riscv@lists.infradead.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox