qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] target/xtensa: fix OOB TLB entry access
@ 2023-12-15 12:03 Max Filippov
  2023-12-18 14:58 ` Peter Maydell
  2024-01-18  8:01 ` Michael Tokarev
  0 siblings, 2 replies; 4+ messages in thread
From: Max Filippov @ 2023-12-15 12:03 UTC (permalink / raw)
  To: qemu-devel; +Cc: Max Filippov, qemu-stable

r[id]tlb[01], [iw][id]tlb opcodes use TLB way index passed in a register
by the guest. The host uses 3 bits of the index for ITLB indexing and 4
bits for DTLB, but there's only 7 entries in the ITLB array and 10 in
the DTLB array, so a malicious guest may trigger out-of-bound access to
these arrays.

Change split_tlb_entry_spec return type to bool to indicate whether TLB
way passed to it is valid. Change get_tlb_entry to return NULL in case
invalid TLB way is requested. Add assertion to xtensa_tlb_get_entry that
requested TLB way and entry indices are valid. Add checks to the
[rwi]tlb helpers that requested TLB way is valid and return 0 or do
nothing when it's not.

Cc: qemu-stable@nongnu.org
Fixes: b67ea0cd7441 ("target-xtensa: implement memory protection options")
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
---
 target/xtensa/mmu_helper.c | 47 ++++++++++++++++++++++++++++----------
 1 file changed, 35 insertions(+), 12 deletions(-)

diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c
index 12552a33470e..2fda4e887cce 100644
--- a/target/xtensa/mmu_helper.c
+++ b/target/xtensa/mmu_helper.c
@@ -224,22 +224,31 @@ static void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v,
  * Split TLB address into TLB way, entry index and VPN (with index).
  * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
  */
-static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
-        uint32_t *vpn, uint32_t *wi, uint32_t *ei)
+static bool split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
+                                 uint32_t *vpn, uint32_t *wi, uint32_t *ei)
 {
     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
         *wi = v & (dtlb ? 0xf : 0x7);
-        split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
+        if (*wi < (dtlb ? env->config->dtlb.nways : env->config->itlb.nways)) {
+            split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
+            return true;
+        } else {
+            return false;
+        }
     } else {
         *vpn = v & REGION_PAGE_MASK;
         *wi = 0;
         *ei = (v >> 29) & 0x7;
+        return true;
     }
 }
 
 static xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env, bool dtlb,
                                               unsigned wi, unsigned ei)
 {
+    const xtensa_tlb *tlb = dtlb ? &env->config->dtlb : &env->config->itlb;
+
+    assert(wi < tlb->nways && ei < tlb->way_size[wi]);
     return dtlb ?
         env->dtlb[wi] + ei :
         env->itlb[wi] + ei;
@@ -252,11 +261,14 @@ static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
     uint32_t wi;
     uint32_t ei;
 
-    split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
-    if (pwi) {
-        *pwi = wi;
+    if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) {
+        if (pwi) {
+            *pwi = wi;
+        }
+        return xtensa_tlb_get_entry(env, dtlb, wi, ei);
+    } else {
+        return NULL;
     }
-    return xtensa_tlb_get_entry(env, dtlb, wi, ei);
 }
 
 static void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
@@ -482,7 +494,12 @@ uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
         uint32_t wi;
         const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
-        return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
+
+        if (entry) {
+            return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
+        } else {
+            return 0;
+        }
     } else {
         return v & REGION_PAGE_MASK;
     }
@@ -491,7 +508,12 @@ uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
 {
     const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
-    return entry->paddr | entry->attr;
+
+    if (entry) {
+        return entry->paddr | entry->attr;
+    } else {
+        return 0;
+    }
 }
 
 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
@@ -499,7 +521,7 @@ void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
         uint32_t wi;
         xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
-        if (entry->variable && entry->asid) {
+        if (entry && entry->variable && entry->asid) {
             tlb_flush_page(env_cpu(env), entry->vaddr);
             entry->asid = 0;
         }
@@ -537,8 +559,9 @@ void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
     uint32_t vpn;
     uint32_t wi;
     uint32_t ei;
-    split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
-    xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
+    if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) {
+        xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
+    }
 }
 
 /*!
-- 
2.39.2



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] target/xtensa: fix OOB TLB entry access
  2023-12-15 12:03 [PATCH] target/xtensa: fix OOB TLB entry access Max Filippov
@ 2023-12-18 14:58 ` Peter Maydell
  2024-01-18  8:01 ` Michael Tokarev
  1 sibling, 0 replies; 4+ messages in thread
From: Peter Maydell @ 2023-12-18 14:58 UTC (permalink / raw)
  To: Max Filippov; +Cc: qemu-devel, qemu-stable

On Fri, 15 Dec 2023 at 12:05, Max Filippov <jcmvbkbc@gmail.com> wrote:
>
> r[id]tlb[01], [iw][id]tlb opcodes use TLB way index passed in a register
> by the guest. The host uses 3 bits of the index for ITLB indexing and 4
> bits for DTLB, but there's only 7 entries in the ITLB array and 10 in
> the DTLB array, so a malicious guest may trigger out-of-bound access to
> these arrays.
>
> Change split_tlb_entry_spec return type to bool to indicate whether TLB
> way passed to it is valid. Change get_tlb_entry to return NULL in case
> invalid TLB way is requested. Add assertion to xtensa_tlb_get_entry that
> requested TLB way and entry indices are valid. Add checks to the
> [rwi]tlb helpers that requested TLB way is valid and return 0 or do
> nothing when it's not.
>
> Cc: qemu-stable@nongnu.org
> Fixes: b67ea0cd7441 ("target-xtensa: implement memory protection options")
> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
> ---

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

thanks
-- PMM


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] target/xtensa: fix OOB TLB entry access
  2023-12-15 12:03 [PATCH] target/xtensa: fix OOB TLB entry access Max Filippov
  2023-12-18 14:58 ` Peter Maydell
@ 2024-01-18  8:01 ` Michael Tokarev
  2024-01-19 16:44   ` Peter Maydell
  1 sibling, 1 reply; 4+ messages in thread
From: Michael Tokarev @ 2024-01-18  8:01 UTC (permalink / raw)
  To: Max Filippov, qemu-devel; +Cc: qemu-stable

15.12.2023 15:03, Max Filippov :
> r[id]tlb[01], [iw][id]tlb opcodes use TLB way index passed in a register
> by the guest. The host uses 3 bits of the index for ITLB indexing and 4
> bits for DTLB, but there's only 7 entries in the ITLB array and 10 in
> the DTLB array, so a malicious guest may trigger out-of-bound access to
> these arrays.
> 
> Change split_tlb_entry_spec return type to bool to indicate whether TLB
> way passed to it is valid. Change get_tlb_entry to return NULL in case
> invalid TLB way is requested. Add assertion to xtensa_tlb_get_entry that
> requested TLB way and entry indices are valid. Add checks to the
> [rwi]tlb helpers that requested TLB way is valid and return 0 or do
> nothing when it's not.
> 
> Cc: qemu-stable@nongnu.org
> Fixes: b67ea0cd7441 ("target-xtensa: implement memory protection options")
> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

Ping?
Can we get this to master before Jan-27? :)

Thanks,

/mjt


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] target/xtensa: fix OOB TLB entry access
  2024-01-18  8:01 ` Michael Tokarev
@ 2024-01-19 16:44   ` Peter Maydell
  0 siblings, 0 replies; 4+ messages in thread
From: Peter Maydell @ 2024-01-19 16:44 UTC (permalink / raw)
  To: Michael Tokarev; +Cc: Max Filippov, qemu-devel, qemu-stable

On Thu, 18 Jan 2024 at 08:01, Michael Tokarev <mjt@tls.msk.ru> wrote:
>
> 15.12.2023 15:03, Max Filippov :
> > r[id]tlb[01], [iw][id]tlb opcodes use TLB way index passed in a register
> > by the guest. The host uses 3 bits of the index for ITLB indexing and 4
> > bits for DTLB, but there's only 7 entries in the ITLB array and 10 in
> > the DTLB array, so a malicious guest may trigger out-of-bound access to
> > these arrays.
> >
> > Change split_tlb_entry_spec return type to bool to indicate whether TLB
> > way passed to it is valid. Change get_tlb_entry to return NULL in case
> > invalid TLB way is requested. Add assertion to xtensa_tlb_get_entry that
> > requested TLB way and entry indices are valid. Add checks to the
> > [rwi]tlb helpers that requested TLB way is valid and return 0 or do
> > nothing when it's not.
> >
> > Cc: qemu-stable@nongnu.org
> > Fixes: b67ea0cd7441 ("target-xtensa: implement memory protection options")
> > Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
>
> Ping?
> Can we get this to master before Jan-27? :)

I can take it via target-arm.next, I guess.

-- PMM


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-01-19 16:45 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-12-15 12:03 [PATCH] target/xtensa: fix OOB TLB entry access Max Filippov
2023-12-18 14:58 ` Peter Maydell
2024-01-18  8:01 ` Michael Tokarev
2024-01-19 16:44   ` Peter Maydell

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).