* [PULL 01/14] target/loongarch: Use auto method with PTW feature
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
@ 2025-10-23 12:06 ` Bibo Mao
2025-10-23 12:06 ` [PULL 02/14] target/loongarch: Add CSR_PWCH write helper function Bibo Mao
` (13 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:06 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
PTW is short for page table walker, it is hardware page table walker
function. With PTW supported, hardware MMU will parse page table
table and update TLB entries automatically.
This patch adds type OnOffAuto for PTW feature setting.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu.c | 26 ++++++++++++++++++++++++++
target/loongarch/cpu.h | 2 ++
2 files changed, 28 insertions(+)
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
index 86490e0f72..e80a92fb2e 100644
--- a/target/loongarch/cpu.c
+++ b/target/loongarch/cpu.c
@@ -224,6 +224,25 @@ static void loongarch_set_msgint(Object *obj, bool value, Error **errp)
cpu->env.cpucfg[1] = FIELD_DP32(cpu->env.cpucfg[1], CPUCFG1, MSG_INT, value);
}
+static bool loongarch_get_ptw(Object *obj, Error **errp)
+{
+ return LOONGARCH_CPU(obj)->ptw != ON_OFF_AUTO_OFF;
+}
+
+static void loongarch_set_ptw(Object *obj, bool value, Error **errp)
+{
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
+
+ cpu->ptw = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+
+ if (kvm_enabled()) {
+ /* PTW feature is only support in TCG mode now */
+ return;
+ }
+
+ cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, HPTW, value);
+}
+
static void loongarch_cpu_post_init(Object *obj)
{
LoongArchCPU *cpu = LOONGARCH_CPU(obj);
@@ -238,7 +257,10 @@ static void loongarch_cpu_post_init(Object *obj)
loongarch_set_lasx);
object_property_add_bool(obj, "msgint", loongarch_get_msgint,
loongarch_set_msgint);
+ object_property_add_bool(obj, "ptw", loongarch_get_ptw,
+ loongarch_set_ptw);
/* lbt is enabled only in kvm mode, not supported in tcg mode */
+
if (kvm_enabled()) {
kvm_loongarch_cpu_post_init(cpu);
}
@@ -346,6 +368,7 @@ static void loongarch_la464_initfn(Object *obj)
env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8);
cpu->msgint = ON_OFF_AUTO_OFF;
+ cpu->ptw = ON_OFF_AUTO_OFF;
loongarch_la464_init_csr(obj);
loongarch_cpu_post_init(obj);
}
@@ -377,6 +400,7 @@ static void loongarch_la132_initfn(Object *obj)
data = FIELD_DP32(data, CPUCFG1, CRC, 1);
env->cpucfg[1] = data;
cpu->msgint = ON_OFF_AUTO_OFF;
+ cpu->ptw = ON_OFF_AUTO_OFF;
}
static void loongarch_max_initfn(Object *obj)
@@ -388,6 +412,8 @@ static void loongarch_max_initfn(Object *obj)
if (tcg_enabled()) {
cpu->env.cpucfg[1] = FIELD_DP32(cpu->env.cpucfg[1], CPUCFG1, MSG_INT, 1);
cpu->msgint = ON_OFF_AUTO_AUTO;
+ cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, HPTW, 1);
+ cpu->ptw = ON_OFF_AUTO_AUTO;
}
}
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index b8e3b46c3a..b1d6799222 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -137,6 +137,7 @@ FIELD(CPUCFG2, LBT_MIPS, 20, 1)
FIELD(CPUCFG2, LBT_ALL, 18, 3)
FIELD(CPUCFG2, LSPW, 21, 1)
FIELD(CPUCFG2, LAM, 22, 1)
+FIELD(CPUCFG2, HPTW, 24, 1)
/* cpucfg[3] bits */
FIELD(CPUCFG3, CCDMA, 0, 1)
@@ -402,6 +403,7 @@ struct ArchCPU {
uint32_t phy_id;
OnOffAuto lbt;
OnOffAuto pmu;
+ OnOffAuto ptw;
OnOffAuto lsx;
OnOffAuto lasx;
OnOffAuto msgint;
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 02/14] target/loongarch: Add CSR_PWCH write helper function
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
2025-10-23 12:06 ` [PULL 01/14] target/loongarch: Use auto method with PTW feature Bibo Mao
@ 2025-10-23 12:06 ` Bibo Mao
2025-10-23 12:06 ` [PULL 03/14] target/loongarch: Add present and write bit with pte entry Bibo Mao
` (12 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:06 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
Bit HPTW_EN in register CSR_PWCH controls enabling hardware page
table walker feature when PTW feature is enabled. Otherwise it is
reserved bit.
Here add register CSR_PWCH write helper function.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu-csr.h | 2 ++
target/loongarch/tcg/csr_helper.c | 15 +++++++++++++++
target/loongarch/tcg/helper.h | 1 +
.../tcg/insn_trans/trans_privileged.c.inc | 1 +
4 files changed, 19 insertions(+)
diff --git a/target/loongarch/cpu-csr.h b/target/loongarch/cpu-csr.h
index 9097fddee1..0bcb51d3a3 100644
--- a/target/loongarch/cpu-csr.h
+++ b/target/loongarch/cpu-csr.h
@@ -105,6 +105,8 @@ FIELD(CSR_PWCH, DIR3_BASE, 0, 6)
FIELD(CSR_PWCH, DIR3_WIDTH, 6, 6)
FIELD(CSR_PWCH, DIR4_BASE, 12, 6)
FIELD(CSR_PWCH, DIR4_WIDTH, 18, 6)
+FIELD(CSR_PWCH, HPTW_EN, 24, 1)
+FIELD(CSR_PWCH, RESERVE, 25, 7)
#define LOONGARCH_CSR_STLBPS 0x1e /* Stlb page size */
FIELD(CSR_STLBPS, PS, 0, 5)
diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c
index 5ebe15f993..c1a8ba3089 100644
--- a/target/loongarch/tcg/csr_helper.c
+++ b/target/loongarch/tcg/csr_helper.c
@@ -163,3 +163,18 @@ target_ulong helper_csrwr_pwcl(CPULoongArchState *env, target_ulong val)
env->CSR_PWCL = val;
return old_v;
}
+
+target_ulong helper_csrwr_pwch(CPULoongArchState *env, target_ulong val)
+{
+ uint8_t has_ptw;
+ int64_t old_v = env->CSR_PWCH;
+
+ val = FIELD_DP64(val, CSR_PWCH, RESERVE, 0);
+ has_ptw = FIELD_EX32(env->cpucfg[2], CPUCFG2, HPTW);
+ if (!has_ptw) {
+ val = FIELD_DP64(val, CSR_PWCH, HPTW_EN, 0);
+ }
+
+ env->CSR_PWCH = val;
+ return old_v;
+ }
diff --git a/target/loongarch/tcg/helper.h b/target/loongarch/tcg/helper.h
index 7e508c5a7b..8a6c62f116 100644
--- a/target/loongarch/tcg/helper.h
+++ b/target/loongarch/tcg/helper.h
@@ -107,6 +107,7 @@ DEF_HELPER_2(csrwr_asid, i64, env, tl)
DEF_HELPER_2(csrwr_tcfg, i64, env, tl)
DEF_HELPER_2(csrwr_ticlr, i64, env, tl)
DEF_HELPER_2(csrwr_pwcl, i64, env, tl)
+DEF_HELPER_2(csrwr_pwch, i64, env, tl)
DEF_HELPER_2(iocsrrd_b, i64, env, tl)
DEF_HELPER_2(iocsrrd_h, i64, env, tl)
DEF_HELPER_2(iocsrrd_w, i64, env, tl)
diff --git a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
index 64e53a4460..2094d182ac 100644
--- a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
@@ -79,6 +79,7 @@ void loongarch_csr_translate_init(void)
SET_CSR_FUNC(ASID, NULL, gen_helper_csrwr_asid);
SET_CSR_FUNC(PGD, gen_helper_csrrd_pgd, NULL);
SET_CSR_FUNC(PWCL, NULL, gen_helper_csrwr_pwcl);
+ SET_CSR_FUNC(PWCH, NULL, gen_helper_csrwr_pwch);
SET_CSR_FUNC(CPUID, gen_helper_csrrd_cpuid, NULL);
SET_CSR_FUNC(TCFG, NULL, gen_helper_csrwr_tcfg);
SET_CSR_FUNC(TVAL, gen_helper_csrrd_tval, NULL);
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 03/14] target/loongarch: Add present and write bit with pte entry
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
2025-10-23 12:06 ` [PULL 01/14] target/loongarch: Use auto method with PTW feature Bibo Mao
2025-10-23 12:06 ` [PULL 02/14] target/loongarch: Add CSR_PWCH write helper function Bibo Mao
@ 2025-10-23 12:06 ` Bibo Mao
2025-10-23 12:07 ` [PULL 04/14] target/loongarch: Add function sptw_prepare_tlb before adding tlb entry Bibo Mao
` (11 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:06 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
With hardware PTW feature enabled, Present bit and Write bit is checked
by hardware, rather Valid bit and Dirty bit. Bit P means that the page is
valid and present, and bit W means that the page is writable.
The original V bit is treated as access bit, hardware sets this bit if
there is a read or write access. Bit D bit is updated by hardware if
there is a write access.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu-csr.h | 2 ++
target/loongarch/cpu-mmu.h | 31 +++++++++++++++++++++++++++++++
target/loongarch/cpu_helper.c | 7 ++++---
target/loongarch/tcg/tlb_helper.c | 16 ++++++++--------
4 files changed, 45 insertions(+), 11 deletions(-)
diff --git a/target/loongarch/cpu-csr.h b/target/loongarch/cpu-csr.h
index 0bcb51d3a3..6898947498 100644
--- a/target/loongarch/cpu-csr.h
+++ b/target/loongarch/cpu-csr.h
@@ -70,6 +70,8 @@ FIELD(TLBENTRY, PLV, 2, 2)
FIELD(TLBENTRY, MAT, 4, 2)
FIELD(TLBENTRY, G, 6, 1)
FIELD(TLBENTRY, HUGE, 6, 1)
+FIELD(TLBENTRY, P, 7, 1)
+FIELD(TLBENTRY, W, 8, 1)
FIELD(TLBENTRY, HGLOBAL, 12, 1)
FIELD(TLBENTRY, LEVEL, 13, 2)
FIELD(TLBENTRY_32, PPN, 8, 24)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index dbc69c7c0f..4ba82a7f81 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -27,6 +27,37 @@ typedef struct MMUContext {
int prot;
} MMUContext;
+static inline bool cpu_has_ptw(CPULoongArchState *env)
+{
+ return !!FIELD_EX64(env->CSR_PWCH, CSR_PWCH, HPTW_EN);
+}
+
+static inline bool pte_present(CPULoongArchState *env, uint64_t entry)
+{
+ uint8_t present;
+
+ if (cpu_has_ptw(env)) {
+ present = FIELD_EX64(entry, TLBENTRY, P);
+ } else {
+ present = FIELD_EX64(entry, TLBENTRY, V);
+ }
+
+ return !!present;
+}
+
+static inline bool pte_write(CPULoongArchState *env, uint64_t entry)
+{
+ uint8_t writable;
+
+ if (cpu_has_ptw(env)) {
+ writable = FIELD_EX64(entry, TLBENTRY, W);
+ } else {
+ writable = FIELD_EX64(entry, TLBENTRY, D);
+ }
+
+ return !!writable;
+}
+
bool check_ps(CPULoongArchState *ent, uint8_t ps);
TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
MMUAccessType access_type, int mmu_idx);
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 867e7c8867..5165c44c7d 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -49,12 +49,13 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
{
uint64_t plv = mmu_idx;
uint64_t tlb_entry, tlb_ppn;
- uint8_t tlb_ps, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
+ uint8_t tlb_ps, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
+ bool tlb_v, tlb_d;
tlb_entry = context->pte;
tlb_ps = context->ps;
- tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
- tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
+ tlb_v = pte_present(env, tlb_entry);
+ tlb_d = pte_write(env, tlb_entry);
tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
if (is_la64(env)) {
tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index e119f78d92..bc89a4d5ee 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -114,9 +114,8 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index)
uint8_t tlb_ps;
LoongArchTLB *tlb = &env->tlb[index];
int idxmap = BIT(MMU_KERNEL_IDX) | BIT(MMU_USER_IDX);
- uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
- uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
+ bool tlb_v;
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
@@ -124,12 +123,14 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index)
addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask;
addr = sextract64(addr, 0, TARGET_VIRT_ADDR_SPACE_BITS);
- if (tlb_v0) {
+ tlb_v = pte_present(env, tlb->tlb_entry0);
+ if (tlb_v) {
tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
idxmap, TARGET_LONG_BITS);
}
- if (tlb_v1) {
+ tlb_v = pte_present(env, tlb->tlb_entry1);
+ if (tlb_v) {
tlb_flush_range_by_mmuidx(env_cpu(env), addr + pagesize, pagesize,
idxmap, TARGET_LONG_BITS);
}
@@ -335,8 +336,7 @@ void helper_tlbwr(CPULoongArchState *env)
{
int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
LoongArchTLB *old, new = {};
- bool skip_inv = false;
- uint8_t tlb_v0, tlb_v1;
+ bool skip_inv = false, tlb_v0, tlb_v1;
old = env->tlb + index;
if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
@@ -348,8 +348,8 @@ void helper_tlbwr(CPULoongArchState *env)
/* Check whether ASID/VPPN is the same */
if (old->tlb_misc == new.tlb_misc) {
/* Check whether both even/odd pages is the same or invalid */
- tlb_v0 = FIELD_EX64(old->tlb_entry0, TLBENTRY, V);
- tlb_v1 = FIELD_EX64(old->tlb_entry1, TLBENTRY, V);
+ tlb_v0 = pte_present(env, old->tlb_entry0);
+ tlb_v1 = pte_present(env, old->tlb_entry1);
if ((!tlb_v0 || new.tlb_entry0 == old->tlb_entry0) &&
(!tlb_v1 || new.tlb_entry1 == old->tlb_entry1)) {
skip_inv = true;
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 04/14] target/loongarch: Add function sptw_prepare_tlb before adding tlb entry
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (2 preceding siblings ...)
2025-10-23 12:06 ` [PULL 03/14] target/loongarch: Add present and write bit with pte entry Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 05/14] target/loongarch: target/loongarch: Add common function get_tlb_random_index() Bibo Mao
` (10 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
With software page table walker, tlb entry comes from CSR registers.
however with hardware page table walker, tlb entry comes from page
table entry information directly, TLB CSR registers are not necessary.
Here add function sptw_prepare_context(), get tlb entry information
from TLB CSR registers.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu-mmu.h | 1 +
target/loongarch/tcg/tlb_helper.c | 23 +++++++++++++++++++++--
2 files changed, 22 insertions(+), 2 deletions(-)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 4ba82a7f81..aa43e57128 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -25,6 +25,7 @@ typedef struct MMUContext {
hwaddr physical;
int ps; /* page size shift */
int prot;
+ uint64_t pte_buddy[2];
} MMUContext;
static inline bool cpu_has_ptw(CPULoongArchState *env)
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index bc89a4d5ee..f42bbcde67 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -159,10 +159,10 @@ static void invalidate_tlb(CPULoongArchState *env, int index)
invalidate_tlb_entry(env, index);
}
-static void fill_tlb_entry(CPULoongArchState *env, LoongArchTLB *tlb)
+/* Prepare tlb entry information in software PTW mode */
+static void sptw_prepare_context(CPULoongArchState *env, MMUContext *context)
{
uint64_t lo0, lo1, csr_vppn;
- uint16_t csr_asid;
uint8_t csr_ps;
if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
@@ -185,6 +185,25 @@ static void fill_tlb_entry(CPULoongArchState *env, LoongArchTLB *tlb)
lo1 = env->CSR_TLBELO1;
}
+ context->ps = csr_ps;
+ context->addr = csr_vppn << R_TLB_MISC_VPPN_SHIFT;
+ context->pte_buddy[0] = lo0;
+ context->pte_buddy[1] = lo1;
+}
+
+static void fill_tlb_entry(CPULoongArchState *env, LoongArchTLB *tlb)
+{
+ uint64_t lo0, lo1, csr_vppn;
+ uint16_t csr_asid;
+ uint8_t csr_ps;
+ MMUContext context;
+
+ sptw_prepare_context(env, &context);
+ csr_vppn = context.addr >> R_TLB_MISC_VPPN_SHIFT;
+ csr_ps = context.ps;
+ lo0 = context.pte_buddy[0];
+ lo1 = context.pte_buddy[1];
+
/* Store page size in field PS */
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 05/14] target/loongarch: target/loongarch: Add common function get_tlb_random_index()
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (3 preceding siblings ...)
2025-10-23 12:07 ` [PULL 04/14] target/loongarch: Add function sptw_prepare_tlb before adding tlb entry Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 06/14] target/loongarch: Add MMUContext parameter in fill_tlb_entry() Bibo Mao
` (9 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
With software PTW system, tlb index is calculated randomly when new
TLB entry is added. For hardware PTW, it is the same logic to add
new TLB entry.
Here common function get_tlb_random_index() is added to get random
tlb index when adding new TLB entry.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/tcg/tlb_helper.c | 39 +++++++++++++++++++------------
1 file changed, 24 insertions(+), 15 deletions(-)
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index f42bbcde67..dd1a92d28d 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -383,31 +383,21 @@ void helper_tlbwr(CPULoongArchState *env)
*old = new;
}
-void helper_tlbfill(CPULoongArchState *env)
+static int get_tlb_random_index(CPULoongArchState *env, vaddr addr,
+ int pagesize)
{
- uint64_t address, entryhi;
+ uint64_t address;
int index, set, i, stlb_idx;
- uint16_t pagesize, stlb_ps;
- uint16_t asid, tlb_asid;
+ uint16_t asid, tlb_asid, stlb_ps;
LoongArchTLB *tlb;
uint8_t tlb_e, tlb_g;
- if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
- entryhi = env->CSR_TLBREHI;
- /* Validity of pagesize is checked in helper_ldpte() */
- pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
- } else {
- entryhi = env->CSR_TLBEHI;
- /* Validity of pagesize is checked in helper_tlbrd() */
- pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
- }
-
/* Validity of stlb_ps is checked in helper_csrwr_stlbps() */
stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
if (pagesize == stlb_ps) {
/* Only write into STLB bits [47:13] */
- address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
+ address = addr & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
set = -1;
stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
for (i = 0; i < 8; ++i) {
@@ -454,6 +444,25 @@ void helper_tlbfill(CPULoongArchState *env)
}
}
+ return index;
+}
+
+void helper_tlbfill(CPULoongArchState *env)
+{
+ vaddr entryhi;
+ int index, pagesize;
+
+ if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
+ entryhi = env->CSR_TLBREHI;
+ /* Validity of pagesize is checked in helper_ldpte() */
+ pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
+ } else {
+ entryhi = env->CSR_TLBEHI;
+ /* Validity of pagesize is checked in helper_tlbrd() */
+ pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
+ }
+
+ index = get_tlb_random_index(env, entryhi, pagesize);
invalidate_tlb(env, index);
fill_tlb_entry(env, env->tlb + index);
}
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 06/14] target/loongarch: Add MMUContext parameter in fill_tlb_entry()
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (4 preceding siblings ...)
2025-10-23 12:07 ` [PULL 05/14] target/loongarch: target/loongarch: Add common function get_tlb_random_index() Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 07/14] target/loongarch: Add debug parameter with loongarch_page_table_walker() Bibo Mao
` (8 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
Function fill_tlb_entry() can be used with hardware PTW in future,
here add input parameter MMUContext in fill_tlb_entry().
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/tcg/tlb_helper.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index dd1a92d28d..cdde721a21 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -191,18 +191,17 @@ static void sptw_prepare_context(CPULoongArchState *env, MMUContext *context)
context->pte_buddy[1] = lo1;
}
-static void fill_tlb_entry(CPULoongArchState *env, LoongArchTLB *tlb)
+static void fill_tlb_entry(CPULoongArchState *env, LoongArchTLB *tlb,
+ MMUContext *context)
{
uint64_t lo0, lo1, csr_vppn;
uint16_t csr_asid;
uint8_t csr_ps;
- MMUContext context;
- sptw_prepare_context(env, &context);
- csr_vppn = context.addr >> R_TLB_MISC_VPPN_SHIFT;
- csr_ps = context.ps;
- lo0 = context.pte_buddy[0];
- lo1 = context.pte_buddy[1];
+ csr_vppn = context->addr >> R_TLB_MISC_VPPN_SHIFT;
+ csr_ps = context->ps;
+ lo0 = context->pte_buddy[0];
+ lo1 = context->pte_buddy[1];
/* Store page size in field PS */
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
@@ -356,6 +355,7 @@ void helper_tlbwr(CPULoongArchState *env)
int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
LoongArchTLB *old, new = {};
bool skip_inv = false, tlb_v0, tlb_v1;
+ MMUContext context;
old = env->tlb + index;
if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
@@ -363,7 +363,8 @@ void helper_tlbwr(CPULoongArchState *env)
return;
}
- fill_tlb_entry(env, &new);
+ sptw_prepare_context(env, &context);
+ fill_tlb_entry(env, &new, &context);
/* Check whether ASID/VPPN is the same */
if (old->tlb_misc == new.tlb_misc) {
/* Check whether both even/odd pages is the same or invalid */
@@ -451,6 +452,7 @@ void helper_tlbfill(CPULoongArchState *env)
{
vaddr entryhi;
int index, pagesize;
+ MMUContext context;
if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
entryhi = env->CSR_TLBREHI;
@@ -462,9 +464,10 @@ void helper_tlbfill(CPULoongArchState *env)
pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
}
+ sptw_prepare_context(env, &context);
index = get_tlb_random_index(env, entryhi, pagesize);
invalidate_tlb(env, index);
- fill_tlb_entry(env, env->tlb + index);
+ fill_tlb_entry(env, env->tlb + index, &context);
}
void helper_tlbclr(CPULoongArchState *env)
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 07/14] target/loongarch: Add debug parameter with loongarch_page_table_walker()
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (5 preceding siblings ...)
2025-10-23 12:07 ` [PULL 06/14] target/loongarch: Add MMUContext parameter in fill_tlb_entry() Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 08/14] target/loongarch: Reserve higher 48 bit PTE attribute with huge page Bibo Mao
` (7 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
Add debug parameter with function loongarch_page_table_walker(),
in debug mode it is only to get physical address. And It used in
future HW PTW usage, bit dirty and access will be updated in HW
PTW mode.
Also function loongarch_page_table_walker() is renamed as
loongarch_ptw() for short.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu_helper.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 5165c44c7d..8af6ee7fb1 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -106,9 +106,8 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
return TLBRET_MATCH;
}
-static TLBRet loongarch_page_table_walker(CPULoongArchState *env,
- MMUContext *context,
- int access_type, int mmu_idx)
+static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
+ int access_type, int mmu_idx, int debug)
{
CPUState *cs = env_cpu(env);
target_ulong index, phys;
@@ -184,7 +183,7 @@ static TLBRet loongarch_map_address(CPULoongArchState *env,
* legal mapping, even if the mapping is not yet in TLB. return 0 if
* there is a valid map, else none zero.
*/
- return loongarch_page_table_walker(env, context, access_type, mmu_idx);
+ return loongarch_ptw(env, context, access_type, mmu_idx, is_debug);
}
return TLBRET_NOMATCH;
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 08/14] target/loongarch: Reserve higher 48 bit PTE attribute with huge page
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (6 preceding siblings ...)
2025-10-23 12:07 ` [PULL 07/14] target/loongarch: Add debug parameter with loongarch_page_table_walker() Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 09/14] target/loongarch: Move last PTE lookup into page table walker loop Bibo Mao
` (6 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
With PTE entry, high bit 48-63 is valid HW bit for PTE attribute,
for example bit 63 is RPLV and bit 62 is NX. With page directory table,
it is physical address of page table from view of HW, so high bit
48-63 need be discarded.
Here reverve high bit 48-63 with huge page since it is PTE entry, and
only discard it with page directory table.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu_helper.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 8af6ee7fb1..8388bfb782 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -134,10 +134,13 @@ static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
/* get next level page directory */
index = (address >> dir_base) & ((1 << dir_width) - 1);
phys = base | index << 3;
- base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
+ base = ldq_phys(cs->as, phys);
if (FIELD_EX64(base, TLBENTRY, HUGE)) {
/* base is a huge pte */
break;
+ } else {
+ /* Discard high bits with page directory table */
+ base &= TARGET_PHYS_MASK;
}
}
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 09/14] target/loongarch: Move last PTE lookup into page table walker loop
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (7 preceding siblings ...)
2025-10-23 12:07 ` [PULL 08/14] target/loongarch: Reserve higher 48 bit PTE attribute with huge page Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 10/14] target/loongarch: Add field tlb_index to record TLB search info Bibo Mao
` (5 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
The last PTE lookup sentence is much similiar with the whole page
table walker loop, move it into the whole loop.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu_helper.c | 24 ++++++++++--------------
1 file changed, 10 insertions(+), 14 deletions(-)
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 8388bfb782..520fd74b2b 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -124,7 +124,7 @@ static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
}
base &= TARGET_PHYS_MASK;
- for (level = 4; level > 0; level--) {
+ for (level = 4; level >= 0; level--) {
get_dir_base_width(env, &dir_base, &dir_width, level);
if (dir_width == 0) {
@@ -135,17 +135,19 @@ static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
index = (address >> dir_base) & ((1 << dir_width) - 1);
phys = base | index << 3;
base = ldq_phys(cs->as, phys);
- if (FIELD_EX64(base, TLBENTRY, HUGE)) {
- /* base is a huge pte */
- break;
- } else {
- /* Discard high bits with page directory table */
- base &= TARGET_PHYS_MASK;
+ if (level) {
+ if (FIELD_EX64(base, TLBENTRY, HUGE)) {
+ /* base is a huge pte */
+ break;
+ } else {
+ /* Discard high bits with page directory table */
+ base &= TARGET_PHYS_MASK;
+ }
}
}
/* pte */
- if (FIELD_EX64(base, TLBENTRY, HUGE)) {
+ if (level > 0) {
/* Huge Page. base is pte */
base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
@@ -153,12 +155,6 @@ static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
base = FIELD_DP64(base, TLBENTRY, G, 1);
}
- } else {
- /* Normal Page. base points to pte */
- get_dir_base_width(env, &dir_base, &dir_width, 0);
- index = (address >> dir_base) & ((1 << dir_width) - 1);
- phys = base | index << 3;
- base = ldq_phys(cs->as, phys);
}
context->ps = dir_base;
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 10/14] target/loongarch: Add field tlb_index to record TLB search info
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (8 preceding siblings ...)
2025-10-23 12:07 ` [PULL 09/14] target/loongarch: Move last PTE lookup into page table walker loop Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 11/14] target/loongarch: Add common interface update_tlb_index() Bibo Mao
` (4 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
With hardware PTW function, TLB entry will be searched at first.
If there is odd/even page on one TLB entry, and odd page is valid and
even page is none. When software access memory with address in even
page, hardware PTW will happen and fill new entry in the same TLB entry.
Here add field tlb_index to record TLB index when search TLB tables.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu-mmu.h | 2 ++
target/loongarch/cpu_helper.c | 3 +++
target/loongarch/tcg/tlb_helper.c | 1 +
3 files changed, 6 insertions(+)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index aa43e57128..3d6ae6cf2c 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -25,6 +25,8 @@ typedef struct MMUContext {
hwaddr physical;
int ps; /* page size shift */
int prot;
+ int tlb_index;
+ int mmu_index;
uint64_t pte_buddy[2];
} MMUContext;
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 520fd74b2b..caad357adf 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -97,6 +97,7 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
context->physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
(context->addr & MAKE_64BIT_MASK(0, tlb_ps));
context->prot = PAGE_READ;
+ context->mmu_index = tlb_plv;
if (tlb_d) {
context->prot |= PAGE_WRITE;
}
@@ -216,6 +217,7 @@ TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context,
if (da & !pg) {
context->physical = address & TARGET_PHYS_MASK;
context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ context->mmu_index = MMU_DA_IDX;
return TLBRET_MATCH;
}
@@ -235,6 +237,7 @@ TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context,
if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
context->physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ context->mmu_index = MMU_DA_IDX;
return TLBRET_MATCH;
}
}
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index cdde721a21..8d962ce3e3 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -746,6 +746,7 @@ static TLBRet loongarch_map_tlb_entry(CPULoongArchState *env,
n = (context->addr >> tlb_ps) & 0x1;/* Odd or even */
context->pte = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
context->ps = tlb_ps;
+ context->tlb_index = index;
return loongarch_check_pte(env, context, access_type, mmu_idx);
}
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 11/14] target/loongarch: Add common interface update_tlb_index()
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (9 preceding siblings ...)
2025-10-23 12:07 ` [PULL 10/14] target/loongarch: Add field tlb_index to record TLB search info Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 12/14] target/loongarch: Add basic hardware PTW support Bibo Mao
` (3 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
Common API update_tlb_index() is added here, it is to update TLB entry
with specified index. It is called by helper_tlbwr() now, also it can
be used by HW PTW when adding new TLB entry.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/tcg/tlb_helper.c | 27 +++++++++++++++++----------
1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index 8d962ce3e3..92f89841b0 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -350,21 +350,14 @@ void helper_tlbrd(CPULoongArchState *env)
}
}
-void helper_tlbwr(CPULoongArchState *env)
+static void update_tlb_index(CPULoongArchState *env, MMUContext *context,
+ int index)
{
- int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
LoongArchTLB *old, new = {};
bool skip_inv = false, tlb_v0, tlb_v1;
- MMUContext context;
old = env->tlb + index;
- if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
- invalidate_tlb(env, index);
- return;
- }
-
- sptw_prepare_context(env, &context);
- fill_tlb_entry(env, &new, &context);
+ fill_tlb_entry(env, &new, context);
/* Check whether ASID/VPPN is the same */
if (old->tlb_misc == new.tlb_misc) {
/* Check whether both even/odd pages is the same or invalid */
@@ -384,6 +377,20 @@ void helper_tlbwr(CPULoongArchState *env)
*old = new;
}
+void helper_tlbwr(CPULoongArchState *env)
+{
+ int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
+ MMUContext context;
+
+ if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
+ invalidate_tlb(env, index);
+ return;
+ }
+
+ sptw_prepare_context(env, &context);
+ update_tlb_index(env, &context, index);
+}
+
static int get_tlb_random_index(CPULoongArchState *env, vaddr addr,
int pagesize)
{
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 12/14] target/loongarch: Add basic hardware PTW support
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (10 preceding siblings ...)
2025-10-23 12:07 ` [PULL 11/14] target/loongarch: Add common interface update_tlb_index() Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 13/14] target/loongarch: Update matched ptw bit A/D with PTW supported Bibo Mao
` (2 subsequent siblings)
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
However with hardware PTW supported, hardware will search page table
with TLB miss. Also if there is no TLB miss however bit Present is not set,
hardware PTW will happen also. Because there is odd/even page in one TLB
entry on LoongArch system, for example in the first time odd TLB entry is
valid and even TLB entry is 0. When software accesses with address within
even page, there is no TLB miss only that TLB entry is 0. In this
condition, hardwre PTW will happen also.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu-mmu.h | 2 ++
target/loongarch/cpu_helper.c | 17 ++++++++++++++---
target/loongarch/tcg/tlb_helper.c | 25 +++++++++++++++++++++++++
3 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 3d6ae6cf2c..158bb61429 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -67,6 +67,8 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context,
MMUAccessType access_type, int mmu_idx,
int is_debug);
+TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
+ int access_type, int mmu_idx, int debug);
void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
uint64_t *dir_width, unsigned int level);
hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index caad357adf..55efe44cb4 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -107,11 +107,11 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
return TLBRET_MATCH;
}
-static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
- int access_type, int mmu_idx, int debug)
+TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
+ int access_type, int mmu_idx, int debug)
{
CPUState *cs = env_cpu(env);
- target_ulong index, phys;
+ target_ulong index = 0, phys = 0;
uint64_t dir_base, dir_width;
uint64_t base;
int level;
@@ -139,6 +139,8 @@ static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
if (level) {
if (FIELD_EX64(base, TLBENTRY, HUGE)) {
/* base is a huge pte */
+ index = 0;
+ dir_base -= 1;
break;
} else {
/* Discard high bits with page directory table */
@@ -156,6 +158,15 @@ static TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
base = FIELD_DP64(base, TLBENTRY, G, 1);
}
+
+ context->pte_buddy[index] = base;
+ context->pte_buddy[1 - index] = base + BIT_ULL(dir_base);
+ base += (BIT_ULL(dir_base) & address);
+ } else if (cpu_has_ptw(env)) {
+ index &= 1;
+ context->pte_buddy[index] = base;
+ context->pte_buddy[1 - index] = ldq_phys(cs->as,
+ phys + 8 * (1 - 2 * index));
}
context->ps = dir_base;
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index 92f89841b0..1f3aaaa41d 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -601,6 +601,18 @@ void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
}
}
+static void ptw_update_tlb(CPULoongArchState *env, MMUContext *context)
+{
+ int index;
+
+ index = context->tlb_index;
+ if (index < 0) {
+ index = get_tlb_random_index(env, context->addr, context->ps);
+ }
+
+ update_tlb_index(env, context, index);
+}
+
bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
@@ -613,7 +625,20 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* Data access */
context.addr = address;
+ context.tlb_index = -1;
ret = get_physical_address(env, &context, access_type, mmu_idx, 0);
+ if (ret != TLBRET_MATCH && cpu_has_ptw(env)) {
+ /* Take HW PTW if TLB missed or bit P is zero */
+ if (ret == TLBRET_NOMATCH || ret == TLBRET_INVALID) {
+ ret = loongarch_ptw(env, &context, access_type, mmu_idx, 0);
+ if (ret == TLBRET_MATCH) {
+ ptw_update_tlb(env, &context);
+ }
+ } else if (context.tlb_index >= 0) {
+ invalidate_tlb(env, context.tlb_index);
+ }
+ }
+
if (ret == TLBRET_MATCH) {
physical = context.physical;
prot = context.prot;
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 13/14] target/loongarch: Update matched ptw bit A/D with PTW supported
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (11 preceding siblings ...)
2025-10-23 12:07 ` [PULL 12/14] target/loongarch: Add basic hardware PTW support Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 12:07 ` [PULL 14/14] target/loongarch: Add bit A/D checking in TLB entry " Bibo Mao
2025-10-23 19:33 ` [PULL 00/14] loongarch queue Richard Henderson
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
With hardware PTE supported, bit A will be set if there is read access
or instruction fetch, and bit D will be set with write access.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/cpu-mmu.h | 26 ++++++++++
target/loongarch/cpu_helper.c | 93 ++++++++++++++++++++++++++++++++++-
2 files changed, 117 insertions(+), 2 deletions(-)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 158bb61429..2259de9d36 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -61,6 +61,32 @@ static inline bool pte_write(CPULoongArchState *env, uint64_t entry)
return !!writable;
}
+/*
+ * The folloing functions should be called with PTW enable checked
+ * With hardware PTW enabled
+ * Bit D will be set by hardware with write access
+ * Bit A will be set by hardware with read/intruction fetch access
+ */
+static inline uint64_t pte_mkaccess(uint64_t entry)
+{
+ return FIELD_DP64(entry, TLBENTRY, V, 1);
+}
+
+static inline uint64_t pte_mkdirty(uint64_t entry)
+{
+ return FIELD_DP64(entry, TLBENTRY, D, 1);
+}
+
+static inline bool pte_access(uint64_t entry)
+{
+ return !!FIELD_EX64(entry, TLBENTRY, V);
+}
+
+static inline bool pte_dirty(uint64_t entry)
+{
+ return !!FIELD_EX64(entry, TLBENTRY, D);
+}
+
bool check_ps(CPULoongArchState *ent, uint8_t ps);
TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
MMUAccessType access_type, int mmu_idx);
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 55efe44cb4..a6eba4f416 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -107,15 +107,52 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context,
return TLBRET_MATCH;
}
+static MemTxResult loongarch_cmpxchg_phys(CPUState *cs, hwaddr phys,
+ uint64_t old, uint64_t new)
+{
+ hwaddr addr1, l = 8;
+ MemoryRegion *mr;
+ uint8_t *ram_ptr;
+ uint64_t old1;
+ MemTxResult ret;
+
+ rcu_read_lock();
+ mr = address_space_translate(cs->as, phys, &addr1, &l,
+ false, MEMTXATTRS_UNSPECIFIED);
+ if (!memory_region_is_ram(mr)) {
+ /*
+ * Misconfigured PTE in ROM (AD bits are not preset) or
+ * PTE is in IO space and can't be updated atomically.
+ */
+ rcu_read_unlock();
+ return MEMTX_ACCESS_ERROR;
+ }
+
+ ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ old1 = qatomic_cmpxchg((uint64_t *)ram_ptr, cpu_to_le64(old),
+ cpu_to_le64(new));
+ old1 = le64_to_cpu(old1);
+ if (old1 == old) {
+ ret = MEMTX_OK;
+ } else {
+ ret = MEMTX_DECODE_ERROR;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
int access_type, int mmu_idx, int debug)
{
CPUState *cs = env_cpu(env);
target_ulong index = 0, phys = 0;
uint64_t dir_base, dir_width;
- uint64_t base;
+ uint64_t base, pte;
int level;
vaddr address;
+ TLBRet ret;
+ MemTxResult ret1;
address = context->addr;
if ((address >> 63) & 0x1) {
@@ -149,7 +186,9 @@ TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
}
}
+restart:
/* pte */
+ pte = base;
if (level > 0) {
/* Huge Page. base is pte */
base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
@@ -171,7 +210,57 @@ TLBRet loongarch_ptw(CPULoongArchState *env, MMUContext *context,
context->ps = dir_base;
context->pte = base;
- return loongarch_check_pte(env, context, access_type, mmu_idx);
+ ret = loongarch_check_pte(env, context, access_type, mmu_idx);
+ if (debug) {
+ return ret;
+ }
+
+ /*
+ * Update bit A/D with hardware PTW supported
+ *
+ * Need atomic compchxg operation with pte update, other vCPUs may
+ * update pte at the same time.
+ */
+ if (ret == TLBRET_MATCH && cpu_has_ptw(env)) {
+ if (access_type == MMU_DATA_STORE && pte_dirty(base)) {
+ return ret;
+ }
+
+ if (access_type != MMU_DATA_STORE && pte_access(base)) {
+ return ret;
+ }
+
+ base = pte_mkaccess(pte);
+ if (access_type == MMU_DATA_STORE) {
+ base = pte_mkdirty(base);
+ }
+ ret1 = loongarch_cmpxchg_phys(cs, phys, pte, base);
+ /* PTE updated by other CPU, reload PTE entry */
+ if (ret1 == MEMTX_DECODE_ERROR) {
+ base = ldq_phys(cs->as, phys);
+ goto restart;
+ }
+
+ base = context->pte_buddy[index];
+ base = pte_mkaccess(base);
+ if (access_type == MMU_DATA_STORE) {
+ base = pte_mkdirty(base);
+ }
+ context->pte_buddy[index] = base;
+
+ /* Bit A/D need be updated with both Even/Odd page with huge pte */
+ if (level > 0) {
+ index = 1 - index;
+ base = context->pte_buddy[index];
+ base = pte_mkaccess(base);
+ if (access_type == MMU_DATA_STORE) {
+ base = pte_mkdirty(base);
+ }
+ context->pte_buddy[index] = base;
+ }
+ }
+
+ return ret;
}
static TLBRet loongarch_map_address(CPULoongArchState *env,
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* [PULL 14/14] target/loongarch: Add bit A/D checking in TLB entry with PTW supported
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (12 preceding siblings ...)
2025-10-23 12:07 ` [PULL 13/14] target/loongarch: Update matched ptw bit A/D with PTW supported Bibo Mao
@ 2025-10-23 12:07 ` Bibo Mao
2025-10-23 19:33 ` [PULL 00/14] loongarch queue Richard Henderson
14 siblings, 0 replies; 18+ messages in thread
From: Bibo Mao @ 2025-10-23 12:07 UTC (permalink / raw)
To: qemu-devel; +Cc: Song Gao
With read/write access, add bit A/D checking if hardware PTW is
supported. If no matched, hardware page table walk is called. And
then bit A/D is updated in PTE entry and TLB entry is updated also.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/tcg/tlb_helper.c | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index 1f3aaaa41d..01e0a27f0b 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -627,6 +627,31 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
context.addr = address;
context.tlb_index = -1;
ret = get_physical_address(env, &context, access_type, mmu_idx, 0);
+ if (ret == TLBRET_MATCH && context.mmu_index != MMU_DA_IDX
+ && cpu_has_ptw(env)) {
+ bool need_update = true;
+
+ if (access_type == MMU_DATA_STORE && pte_dirty(context.pte)) {
+ need_update = false;
+ } else if (access_type != MMU_DATA_STORE && pte_access(context.pte)) {
+ need_update = false;
+
+ /*
+ * FIXME: should context.prot be set without PAGE_WRITE with
+ * pte_write(context.pte) && !pte_dirty(context.pte)??
+ *
+ * Otherwise there will be no loongarch_cpu_tlb_fill() function call
+ * for MMU_DATA_STORE access_type in future since QEMU TLB with
+ * prot PAGE_WRITE is added already
+ */
+ }
+
+ if (need_update) {
+ /* Need update bit A/D in PTE entry, take PTW again */
+ ret = TLBRET_NOMATCH;
+ }
+ }
+
if (ret != TLBRET_MATCH && cpu_has_ptw(env)) {
/* Take HW PTW if TLB missed or bit P is zero */
if (ret == TLBRET_NOMATCH || ret == TLBRET_INVALID) {
--
2.43.5
^ permalink raw reply related [flat|nested] 18+ messages in thread* Re: [PULL 00/14] loongarch queue
2025-10-23 12:06 [PULL 00/14] loongarch queue Bibo Mao
` (13 preceding siblings ...)
2025-10-23 12:07 ` [PULL 14/14] target/loongarch: Add bit A/D checking in TLB entry " Bibo Mao
@ 2025-10-23 19:33 ` Richard Henderson
14 siblings, 0 replies; 18+ messages in thread
From: Richard Henderson @ 2025-10-23 19:33 UTC (permalink / raw)
To: qemu-devel
On 10/23/25 07:06, Bibo Mao wrote:
> The following changes since commit c0e80879c876cbe4cbde43a92403329bcedf2ba0:
>
> Merge tag 'pull-vfio-20251022' ofhttps://github.com/legoater/qemu into staging (2025-10-22 08:01:21 -0500)
>
> are available in the Git repository at:
>
> https://github.com/bibo-mao/qemu.git tags/pull-loongarch-20251023
>
> for you to fetch changes up to 79ff2eee9a377f654ed0c3533a0874a0e7d6226d:
>
> target/loongarch: Add bit A/D checking in TLB entry with PTW supported (2025-10-23 19:43:48 +0800)
>
> ----------------------------------------------------------------
> pull-loongarch-20251023 queue
Applied, thanks. Please update https://wiki.qemu.org/ChangeLog/10.2 as appropriate.
r~
^ permalink raw reply [flat|nested] 18+ messages in thread