* [PATCH 0/2] target/loongarch: Fix NX enforcement for PTW helpers
@ 2026-03-05 13:53 Andrew S. Rightenburg via qemu development
2026-03-05 13:54 ` [PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE Andrew S. Rightenburg via qemu development
2026-03-05 13:54 ` [PATCH 2/2] target/loongarch: Avoid recursive PNX exception on CSR_BADI fetch Andrew S. Rightenburg via qemu development
0 siblings, 2 replies; 5+ messages in thread
From: Andrew S. Rightenburg via qemu development @ 2026-03-05 13:53 UTC (permalink / raw)
To: qemu-devel; +Cc: gaosong, rail5
From: rail5 <andrew@rail5.org>
The LoongArch ISA defines NX at bit 62 in the page table entry. Under TCG,
NX is checked during translation, but the software page-walk helpers (LDDIR /
LDPTE) were masking the whole PTE value with the PALEN mask. This clears
upper permission bits (including NX), allowing execution from NX mappings.
Fix this by masking only the PPN/address field and preserving permission bits.
Once NX is enforced, instruction fetches from NX pages correctly raise PNX,
but taking PNX could end up looping because loongarch_cpu_do_interrupt() tried
to fetch the faulting instruction to populate CSR_BADI, which faults with PNX
again. Treat PNX like other instruction-fetch exceptions and skip the CSR_BADI
fetch.
Reported at: https://gitlab.com/qemu-project/qemu/-/issues/3319
Tested with a Linux guest by mapping a page RW, writing a single instruction,
mprotect(PROT_READ) (no exec) and then calling through a function pointer.
With this series applied the guest receives SIGSEGV instead of executing or
hanging.
rail5 (2):
target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE
target/loongarch: Avoid recursive PNX exception on CSR_BADI fetch
target/loongarch/tcg/tcg_cpu.c | 2 +-
target/loongarch/tcg/tlb_helper.c | 29 +++++++++++++++++++++++++----
2 files changed, 26 insertions(+), 5 deletions(-)
--
2.47.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE
2026-03-05 13:53 [PATCH 0/2] target/loongarch: Fix NX enforcement for PTW helpers Andrew S. Rightenburg via qemu development
@ 2026-03-05 13:54 ` Andrew S. Rightenburg via qemu development
2026-03-06 4:05 ` Bibo Mao
2026-03-05 13:54 ` [PATCH 2/2] target/loongarch: Avoid recursive PNX exception on CSR_BADI fetch Andrew S. Rightenburg via qemu development
1 sibling, 1 reply; 5+ messages in thread
From: Andrew S. Rightenburg via qemu development @ 2026-03-05 13:54 UTC (permalink / raw)
To: qemu-devel; +Cc: gaosong, rail5, qemu-stable
From: rail5 <andrew@rail5.org>
The LDDIR/LDPTE helpers load a page table entry (or huge page entry)
from guest memory and currently apply the PALEN mask to the whole
64-bit value.
That mask is intended to constrain the physical address bits, but masking
the full entry also clears permission bits in the upper part of the PTE,
including NX (bit 62). As a result, LoongArch TCG can incorrectly allow
instruction fetches from NX mappings when translation is driven through
these helpers.
Mask only the PPN/address field and preserve the rest of the PTE.
This was reported as a bug at:
https://gitlab.com/qemu-project/qemu/-/issues/3319
Fixes: 56599a705f2 ("target/loongarch: Introduce loongarch_palen_mask()")
Cc: qemu-stable@nongnu.org
Signed-off-by: rail5 (Andrew S. Rightenburg) <andrew@rail5.org>
---
target/loongarch/tcg/tlb_helper.c | 29 +++++++++++++++++++++++++----
1 file changed, 25 insertions(+), 4 deletions(-)
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index c1dc77a8f8..8747fa2a0f 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -686,6 +686,24 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
cpu_loop_exit_restore(cs, retaddr);
}
+static inline uint64_t loongarch_mask_pte_ppn(CPULoongArchState *env,
+ uint64_t pte)
+{
+ uint64_t palen_mask = loongarch_palen_mask(env);
+
+ if (is_la64(env)) {
+ uint64_t ppn_bits = pte & MAKE_64BIT_MASK(12, 36);
+ uint64_t ppn_masked = ppn_bits & palen_mask;
+
+ return (pte & ~MAKE_64BIT_MASK(12, 36)) | ppn_masked;
+ } else {
+ uint64_t ppn_bits = pte & MAKE_64BIT_MASK(8, 24);
+ uint64_t ppn_masked = ppn_bits & palen_mask;
+
+ return (pte & ~MAKE_64BIT_MASK(8, 24)) | ppn_masked;
+ }
+}
+
target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
uint32_t level, uint32_t mem_idx)
{
@@ -721,7 +739,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
get_dir_base_width(env, &dir_base, &dir_width, level);
index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
phys = base | index << 3;
- return ldq_le_phys(cs->as, phys) & palen_mask;
+ return loongarch_mask_pte_ppn(env, ldq_le_phys(cs->as, phys));
}
void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
@@ -729,6 +747,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
{
CPUState *cs = env_cpu(env);
hwaddr phys, tmp0, ptindex, ptoffset0, ptoffset1;
+ uint64_t pte_raw;
uint64_t badv;
uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
@@ -744,7 +763,6 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
* and the other is the huge page entry,
* whose bit 6 should be 1.
*/
- base = base & palen_mask;
if (FIELD_EX64(base, TLBENTRY, HUGE)) {
/*
* Gets the huge page level and Gets huge page size.
@@ -768,7 +786,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
* when loaded into the tlb,
* so the tlb page size needs to be divided by 2.
*/
- tmp0 = base;
+ tmp0 = loongarch_mask_pte_ppn(env, base);
if (odd) {
tmp0 += MAKE_64BIT_MASK(ps, 1);
}
@@ -780,12 +798,15 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
} else {
badv = env->CSR_TLBRBADV;
+ base = base & palen_mask;
+
ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
ptindex = ptindex & ~0x1; /* clear bit 0 */
ptoffset0 = ptindex << 3;
ptoffset1 = (ptindex + 1) << 3;
phys = base | (odd ? ptoffset1 : ptoffset0);
- tmp0 = ldq_le_phys(cs->as, phys) & palen_mask;
+ pte_raw = ldq_le_phys(cs->as, phys);
+ tmp0 = loongarch_mask_pte_ppn(env, pte_raw);
ps = ptbase;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] target/loongarch: Avoid recursive PNX exception on CSR_BADI fetch
2026-03-05 13:53 [PATCH 0/2] target/loongarch: Fix NX enforcement for PTW helpers Andrew S. Rightenburg via qemu development
2026-03-05 13:54 ` [PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE Andrew S. Rightenburg via qemu development
@ 2026-03-05 13:54 ` Andrew S. Rightenburg via qemu development
2026-03-06 3:57 ` Bibo Mao
1 sibling, 1 reply; 5+ messages in thread
From: Andrew S. Rightenburg via qemu development @ 2026-03-05 13:54 UTC (permalink / raw)
To: qemu-devel; +Cc: gaosong, rail5, qemu-stable
From: rail5 <andrew@rail5.org>
loongarch_cpu_do_interrupt() updates CSR_BADI by fetching the faulting
instruction with cpu_ldl_code_mmu().
For a PNX exception (instruction fetch prohibited by NX), fetching the
instruction at env->pc will fault with PNX again. This can lead to an
infinite exception loop.
Treat PNX like other instruction-fetch exceptions (PIF/ADEF) and do not
update CSR_BADI for it.
Fixes: 410dfbf620a ("target/loongarch: Move TCG specified functions to tcg_cpu.c")
Cc: qemu-stable@nongnu.org
Signed-off-by: rail5 (Andrew S. Rightenburg) <andrew@rail5.org>
---
target/loongarch/tcg/tcg_cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/loongarch/tcg/tcg_cpu.c b/target/loongarch/tcg/tcg_cpu.c
index af92277669..31d3db6e8e 100644
--- a/target/loongarch/tcg/tcg_cpu.c
+++ b/target/loongarch/tcg/tcg_cpu.c
@@ -109,6 +109,7 @@ static void loongarch_cpu_do_interrupt(CPUState *cs)
}
QEMU_FALLTHROUGH;
case EXCCODE_PIF:
+ case EXCCODE_PNX:
case EXCCODE_ADEF:
cause = cs->exception_index;
update_badinstr = 0;
@@ -129,7 +130,6 @@ static void loongarch_cpu_do_interrupt(CPUState *cs)
case EXCCODE_PIS:
case EXCCODE_PME:
case EXCCODE_PNR:
- case EXCCODE_PNX:
case EXCCODE_PPI:
cause = cs->exception_index;
break;
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 2/2] target/loongarch: Avoid recursive PNX exception on CSR_BADI fetch
2026-03-05 13:54 ` [PATCH 2/2] target/loongarch: Avoid recursive PNX exception on CSR_BADI fetch Andrew S. Rightenburg via qemu development
@ 2026-03-06 3:57 ` Bibo Mao
0 siblings, 0 replies; 5+ messages in thread
From: Bibo Mao @ 2026-03-06 3:57 UTC (permalink / raw)
To: Andrew S. Rightenburg, qemu-devel; +Cc: gaosong, qemu-stable
On 2026/3/5 下午9:54, Andrew S. Rightenburg via qemu development wrote:
> From: rail5 <andrew@rail5.org>
>
> loongarch_cpu_do_interrupt() updates CSR_BADI by fetching the faulting
> instruction with cpu_ldl_code_mmu().
>
> For a PNX exception (instruction fetch prohibited by NX), fetching the
> instruction at env->pc will fault with PNX again. This can lead to an
> infinite exception loop.
>
> Treat PNX like other instruction-fetch exceptions (PIF/ADEF) and do not
> update CSR_BADI for it.
>
> Fixes: 410dfbf620a ("target/loongarch: Move TCG specified functions to tcg_cpu.c")
> Cc: qemu-stable@nongnu.org
> Signed-off-by: rail5 (Andrew S. Rightenburg) <andrew@rail5.org>
> ---
> target/loongarch/tcg/tcg_cpu.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/target/loongarch/tcg/tcg_cpu.c b/target/loongarch/tcg/tcg_cpu.c
> index af92277669..31d3db6e8e 100644
> --- a/target/loongarch/tcg/tcg_cpu.c
> +++ b/target/loongarch/tcg/tcg_cpu.c
> @@ -109,6 +109,7 @@ static void loongarch_cpu_do_interrupt(CPUState *cs)
> }
> QEMU_FALLTHROUGH;
> case EXCCODE_PIF:
> + case EXCCODE_PNX:
> case EXCCODE_ADEF:
> cause = cs->exception_index;
> update_badinstr = 0;
> @@ -129,7 +130,6 @@ static void loongarch_cpu_do_interrupt(CPUState *cs)
> case EXCCODE_PIS:
> case EXCCODE_PME:
> case EXCCODE_PNR:
> - case EXCCODE_PNX:
> case EXCCODE_PPI:
> cause = cs->exception_index;
> break;
>
Reviewed-by: Bibo Mao <maobibo@loongson.cn>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE
2026-03-05 13:54 ` [PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE Andrew S. Rightenburg via qemu development
@ 2026-03-06 4:05 ` Bibo Mao
0 siblings, 0 replies; 5+ messages in thread
From: Bibo Mao @ 2026-03-06 4:05 UTC (permalink / raw)
To: Andrew S. Rightenburg, qemu-devel; +Cc: gaosong, qemu-stable
On 2026/3/5 下午9:54, Andrew S. Rightenburg via qemu development wrote:
> From: rail5 <andrew@rail5.org>
>
> The LDDIR/LDPTE helpers load a page table entry (or huge page entry)
> from guest memory and currently apply the PALEN mask to the whole
> 64-bit value.
>
> That mask is intended to constrain the physical address bits, but masking
> the full entry also clears permission bits in the upper part of the PTE,
> including NX (bit 62). As a result, LoongArch TCG can incorrectly allow
> instruction fetches from NX mappings when translation is driven through
> these helpers.
Good catch.
yeap, it is actually one problem, the upper HW PTE bits are lost.
>
> Mask only the PPN/address field and preserve the rest of the PTE.
>
> This was reported as a bug at:
> https://gitlab.com/qemu-project/qemu/-/issues/3319
>
> Fixes: 56599a705f2 ("target/loongarch: Introduce loongarch_palen_mask()")
> Cc: qemu-stable@nongnu.org
> Signed-off-by: rail5 (Andrew S. Rightenburg) <andrew@rail5.org>
> ---
> target/loongarch/tcg/tlb_helper.c | 29 +++++++++++++++++++++++++----
> 1 file changed, 25 insertions(+), 4 deletions(-)
>
> diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
> index c1dc77a8f8..8747fa2a0f 100644
> --- a/target/loongarch/tcg/tlb_helper.c
> +++ b/target/loongarch/tcg/tlb_helper.c
> @@ -686,6 +686,24 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> cpu_loop_exit_restore(cs, retaddr);
> }
>
> +static inline uint64_t loongarch_mask_pte_ppn(CPULoongArchState *env,
> + uint64_t pte)
> +{
> + uint64_t palen_mask = loongarch_palen_mask(env);
> +
> + if (is_la64(env)) {
> + uint64_t ppn_bits = pte & MAKE_64BIT_MASK(12, 36);
> + uint64_t ppn_masked = ppn_bits & palen_mask;
> +
> + return (pte & ~MAKE_64BIT_MASK(12, 36)) | ppn_masked;
value 36 is hard-code, also I think software PTE bit should be cleared.
how about add HW PTE mask element in structure CPULoongArchState?
> + } else {
> + uint64_t ppn_bits = pte & MAKE_64BIT_MASK(8, 24);
> + uint64_t ppn_masked = ppn_bits & palen_mask;
> +
> + return (pte & ~MAKE_64BIT_MASK(8, 24)) | ppn_masked;
> + }
> +}
> +
> target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
> uint32_t level, uint32_t mem_idx)
> {
> @@ -721,7 +739,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
> get_dir_base_width(env, &dir_base, &dir_width, level);
> index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
> phys = base | index << 3;
> - return ldq_le_phys(cs->as, phys) & palen_mask;
> + return loongarch_mask_pte_ppn(env, ldq_le_phys(cs->as, phys));
I think that it is not needed with lddir() API, lddir returns physical
address of page table without PTE permission. only helper_ldpte() need
modification.
Regards
Bibo Mao
> }
>
> void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
> @@ -729,6 +747,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
> {
> CPUState *cs = env_cpu(env);
> hwaddr phys, tmp0, ptindex, ptoffset0, ptoffset1;
> + uint64_t pte_raw;
> uint64_t badv;
> uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
> uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
> @@ -744,7 +763,6 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
> * and the other is the huge page entry,
> * whose bit 6 should be 1.
> */
> - base = base & palen_mask;
> if (FIELD_EX64(base, TLBENTRY, HUGE)) {
> /*
> * Gets the huge page level and Gets huge page size.
> @@ -768,7 +786,7 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
> * when loaded into the tlb,
> * so the tlb page size needs to be divided by 2.
> */
> - tmp0 = base;
> + tmp0 = loongarch_mask_pte_ppn(env, base);
> if (odd) {
> tmp0 += MAKE_64BIT_MASK(ps, 1);
> }
> @@ -780,12 +798,15 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
> } else {
> badv = env->CSR_TLBRBADV;
>
> + base = base & palen_mask;
> +
> ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
> ptindex = ptindex & ~0x1; /* clear bit 0 */
> ptoffset0 = ptindex << 3;
> ptoffset1 = (ptindex + 1) << 3;
> phys = base | (odd ? ptoffset1 : ptoffset0);
> - tmp0 = ldq_le_phys(cs->as, phys) & palen_mask;
> + pte_raw = ldq_le_phys(cs->as, phys);
> + tmp0 = loongarch_mask_pte_ppn(env, pte_raw);
> ps = ptbase;
> }
>
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2026-03-06 4:09 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-05 13:53 [PATCH 0/2] target/loongarch: Fix NX enforcement for PTW helpers Andrew S. Rightenburg via qemu development
2026-03-05 13:54 ` [PATCH 1/2] target/loongarch: Preserve PTE permission bits in LDDIR/LDPTE Andrew S. Rightenburg via qemu development
2026-03-06 4:05 ` Bibo Mao
2026-03-05 13:54 ` [PATCH 2/2] target/loongarch: Avoid recursive PNX exception on CSR_BADI fetch Andrew S. Rightenburg via qemu development
2026-03-06 3:57 ` Bibo Mao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox