qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH v2 31/54] accel/tcg: Always use IntervalTree for code lookups
Date: Thu, 14 Nov 2024 10:32:52 -0800	[thread overview]
Message-ID: <9bb054fd-40c6-4c44-aa02-52deba8bbc51@linaro.org> (raw)
In-Reply-To: <20241114160131.48616-32-richard.henderson@linaro.org>

On 11/14/24 08:01, Richard Henderson wrote:
> Because translation is special, we don't need the speed
> of the direct-mapped softmmu tlb.  We cache a lookups in
> DisasContextBase within the translator loop anyway.
> 
> Drop the addr_code comparator from CPUTLBEntry.
> Go directly to the IntervalTree for MMU_INST_FETCH.
> Derive exec flags from read flags.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   include/exec/cpu-all.h    |  3 ++
>   include/exec/tlb-common.h |  5 ++-
>   accel/tcg/cputlb.c        | 76 ++++++++++++++++++++++++---------------
>   3 files changed, 52 insertions(+), 32 deletions(-)
> 
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index 45e6676938..ad160c328a 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -339,6 +339,9 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
>       (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
>       | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
>   
> +/* Filter read flags to exec flags. */
> +#define TLB_EXEC_FLAGS_MASK  (TLB_MMIO)
> +
>   /*
>    * Flags stored in CPUTLBEntryFull.slow_flags[x].
>    * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
> diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
> index 300f9fae67..feaa471299 100644
> --- a/include/exec/tlb-common.h
> +++ b/include/exec/tlb-common.h
> @@ -26,7 +26,6 @@ typedef union CPUTLBEntry {
>       struct {
>           uint64_t addr_read;
>           uint64_t addr_write;
> -        uint64_t addr_code;
>           /*
>            * Addend to virtual address to get host address.  IO accesses
>            * use the corresponding iotlb value.
> @@ -35,7 +34,7 @@ typedef union CPUTLBEntry {
>       };
>       /*
>        * Padding to get a power of two size, as well as index
> -     * access to addr_{read,write,code}.
> +     * access to addr_{read,write}.
>        */
>       uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
>   } CPUTLBEntry;
> @@ -92,7 +91,7 @@ struct CPUTLBEntryFull {
>        * Additional tlb flags for use by the slow path. If non-zero,
>        * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
>        */
> -    uint8_t slow_flags[MMU_ACCESS_COUNT];
> +    uint8_t slow_flags[2];
>   
>       /*
>        * Allow target-specific additions to this structure.
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 981098a6f2..be2ea1bc70 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -114,8 +114,9 @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
>                         MMU_DATA_LOAD * sizeof(uint64_t));
>       QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
>                         MMU_DATA_STORE * sizeof(uint64_t));
> -    QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
> -                      MMU_INST_FETCH * sizeof(uint64_t));
> +
> +    tcg_debug_assert(access_type == MMU_DATA_LOAD ||
> +                     access_type == MMU_DATA_STORE);
>   
>   #if TARGET_LONG_BITS == 32
>       /* Use qatomic_read, in case of addr_write; only care about low bits. */
> @@ -480,8 +481,7 @@ static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
>       mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
>   
>       return (page == (tlb_entry->addr_read & mask) ||
> -            page == (tlb_addr_write(tlb_entry) & mask) ||
> -            page == (tlb_entry->addr_code & mask));
> +            page == (tlb_addr_write(tlb_entry) & mask));
>   }
>   
>   /* Called with tlb_c.lock held */
> @@ -1184,9 +1184,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
>       /* Now calculate the new entry */
>       node->copy.addend = addend - addr_page;
>   
> -    tlb_set_compare(full, &node->copy, addr_page, read_flags,
> -                    MMU_INST_FETCH, prot & PAGE_EXEC);
> -
>       if (wp_flags & BP_MEM_READ) {
>           read_flags |= TLB_WATCHPOINT;
>       }
> @@ -1308,22 +1305,30 @@ static bool tlb_lookup(CPUState *cpu, TLBLookupOutput *o,
>       /* Primary lookup in the fast tlb. */
>       entry = tlbfast_entry(fast, addr);
>       full = &desc->fulltlb[tlbfast_index(fast, addr)];
> -    cmp = tlb_read_idx(entry, access_type);
> -    if (tlb_hit(cmp, addr)) {
> -        goto found;
> +    if (access_type != MMU_INST_FETCH) {
> +        cmp = tlb_read_idx(entry, access_type);
> +        if (tlb_hit(cmp, addr)) {
> +            goto found_data;
> +        }
>       }
>   
>       /* Secondary lookup in the IntervalTree. */
>       node = tlbtree_lookup_addr(desc, addr);
>       if (node) {
> -        cmp = tlb_read_idx(&node->copy, access_type);
> -        if (tlb_hit(cmp, addr)) {
> -            /* Install the cached entry. */
> -            qemu_spin_lock(&cpu->neg.tlb.c.lock);
> -            copy_tlb_helper_locked(entry, &node->copy);
> -            qemu_spin_unlock(&cpu->neg.tlb.c.lock);
> -            *full = node->full;
> -            goto found;
> +        if (access_type == MMU_INST_FETCH) {
> +            if (node->full.prot & PAGE_EXEC) {
> +                goto found_code;
> +            }
> +        } else {
> +            cmp = tlb_read_idx(&node->copy, access_type);
> +            if (tlb_hit(cmp, addr)) {
> +                /* Install the cached entry. */
> +                qemu_spin_lock(&cpu->neg.tlb.c.lock);
> +                copy_tlb_helper_locked(entry, &node->copy);
> +                qemu_spin_unlock(&cpu->neg.tlb.c.lock);
> +                *full = node->full;
> +                goto found_data;
> +            }
>           }
>       }
>   
> @@ -1333,9 +1338,14 @@ static bool tlb_lookup(CPUState *cpu, TLBLookupOutput *o,
>           tcg_debug_assert(probe);
>           return false;
>       }
> -
>       o->did_tlb_fill = true;
>   
> +    if (access_type == MMU_INST_FETCH) {
> +        node = tlbtree_lookup_addr(desc, addr);
> +        tcg_debug_assert(node);
> +        goto found_code;
> +    }
> +
>       entry = tlbfast_entry(fast, addr);
>       full = &desc->fulltlb[tlbfast_index(fast, addr)];
>       cmp = tlb_read_idx(entry, access_type);
> @@ -1345,14 +1355,29 @@ static bool tlb_lookup(CPUState *cpu, TLBLookupOutput *o,
>        * called tlb_fill_align, so we know that this entry *is* valid.
>        */
>       flags &= ~TLB_INVALID_MASK;
> +    goto found_data;
> +
> + found_data:
> +    flags &= cmp;
> +    flags |= full->slow_flags[access_type];
> +    o->flags = flags;
> +    o->full = *full;
> +    o->haddr = (void *)((uintptr_t)addr + entry->addend);
>       goto done;
>   
> - found:
> -    /* Alignment has not been checked by tlb_fill_align. */
> -    {
> + found_code:
> +    o->flags = node->copy.addr_read & TLB_EXEC_FLAGS_MASK;
> +    o->full = node->full;
> +    o->haddr = (void *)((uintptr_t)addr + node->copy.addend);
> +    goto done;
> +
> + done:
> +    if (!o->did_tlb_fill) {
>           int a_bits = memop_alignment_bits(memop);
>   
>           /*
> +         * Alignment has not been checked by tlb_fill_align.
> +         *
>            * The TLB_CHECK_ALIGNED check differs from the normal alignment
>            * check, in that this is based on the atomicity of the operation.
>            * The intended use case is the ARM memory type field of each PTE,
> @@ -1366,13 +1391,6 @@ static bool tlb_lookup(CPUState *cpu, TLBLookupOutput *o,
>               cpu_unaligned_access(cpu, addr, access_type, i->mmu_idx, i->ra);
>           }
>       }
> -
> - done:
> -    flags &= cmp;
> -    flags |= full->slow_flags[access_type];
> -    o->flags = flags;
> -    o->full = *full;
> -    o->haddr = (void *)((uintptr_t)addr + entry->addend);
>       return true;
>   }
>   

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>



  reply	other threads:[~2024-11-14 18:33 UTC|newest]

Thread overview: 114+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-14 16:00 [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Richard Henderson
2024-11-14 16:00 ` [PATCH v2 01/54] util/interval-tree: Introduce interval_tree_free_nodes Richard Henderson
2024-11-14 17:51   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 02/54] accel/tcg: Split out tlbfast_flush_locked Richard Henderson
2024-11-14 17:52   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 03/54] accel/tcg: Split out tlbfast_{index,entry} Richard Henderson
2024-11-14 17:52   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 04/54] accel/tcg: Split out tlbfast_flush_range_locked Richard Henderson
2024-11-14 16:00 ` [PATCH v2 05/54] accel/tcg: Fix flags usage in mmu_lookup1, atomic_mmu_lookup Richard Henderson
2024-11-14 17:54   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 06/54] accel/tcg: Assert non-zero length in tlb_flush_range_by_mmuidx* Richard Henderson
2024-11-14 17:56   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 07/54] accel/tcg: Assert bits in range " Richard Henderson
2024-11-14 17:56   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 08/54] accel/tcg: Flush entire tlb when a masked range wraps Richard Henderson
2024-11-14 17:58   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 09/54] accel/tcg: Add IntervalTreeRoot to CPUTLBDesc Richard Henderson
2024-11-14 17:59   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 10/54] accel/tcg: Populate IntervalTree in tlb_set_page_full Richard Henderson
2024-11-14 18:00   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 11/54] accel/tcg: Remove IntervalTree entry in tlb_flush_page_locked Richard Henderson
2024-11-14 18:01   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 12/54] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked Richard Henderson
2024-11-14 18:01   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 13/54] accel/tcg: Process IntervalTree entries in tlb_reset_dirty Richard Henderson
2024-11-14 18:02   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 14/54] accel/tcg: Process IntervalTree entries in tlb_set_dirty Richard Henderson
2024-11-14 18:02   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 15/54] accel/tcg: Use tlb_hit_page in victim_tlb_hit Richard Henderson
2024-11-14 18:03   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 16/54] accel/tcg: Pass full addr to victim_tlb_hit Richard Henderson
2024-11-14 18:04   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 17/54] accel/tcg: Replace victim_tlb_hit with tlbtree_hit Richard Henderson
2024-11-14 18:06   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 18/54] accel/tcg: Remove the victim tlb Richard Henderson
2024-11-14 18:07   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 19/54] accel/tcg: Remove tlb_n_used_entries_inc Richard Henderson
2024-11-14 18:07   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 20/54] include/exec/tlb-common: Move CPUTLBEntryFull from hw/core/cpu.h Richard Henderson
2024-11-14 18:08   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 21/54] accel/tcg: Delay plugin adjustment in probe_access_internal Richard Henderson
2024-11-14 18:09   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 22/54] accel/tcg: Call cpu_ld*_code_mmu from cpu_ld*_code Richard Henderson
2024-11-14 18:09   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 23/54] accel/tcg: Check original prot bits for read in atomic_mmu_lookup Richard Henderson
2024-11-14 18:09   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 24/54] accel/tcg: Preserve tlb flags in tlb_set_compare Richard Henderson
2024-11-14 18:11   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 25/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full_mmu Richard Henderson
2024-11-14 18:11   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 26/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full Richard Henderson
2024-11-14 18:12   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 27/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_internal Richard Henderson
2024-11-14 18:13   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 28/54] accel/tcg: Introduce tlb_lookup Richard Henderson
2024-11-14 18:29   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 29/54] accel/tcg: Partially unify MMULookupPageData and TLBLookupOutput Richard Henderson
2024-11-14 18:29   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 30/54] accel/tcg: Merge mmu_lookup1 into mmu_lookup Richard Henderson
2024-11-14 18:31   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 31/54] accel/tcg: Always use IntervalTree for code lookups Richard Henderson
2024-11-14 18:32   ` Pierrick Bouvier [this message]
2024-11-14 16:01 ` [PATCH v2 32/54] accel/tcg: Link CPUTLBEntry to CPUTLBEntryTree Richard Henderson
2024-11-14 18:39   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb Richard Henderson
2024-11-14 18:49   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 34/54] target/alpha: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 35/54] target/avr: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 36/54] target/i386: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 37/54] target/loongarch: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 38/54] target/m68k: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 39/54] target/m68k: Do not call tlb_set_page in helper_ptest Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 40/54] target/microblaze: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 41/54] target/mips: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 42/54] target/openrisc: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 43/54] target/ppc: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 44/54] target/riscv: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 45/54] target/rx: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 46/54] target/s390x: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 47/54] target/sh4: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 48/54] target/sparc: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 49/54] target/tricore: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 50/54] target/xtensa: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 51/54] accel/tcg: Drop TCGCPUOps.tlb_fill Richard Henderson
2024-11-14 18:55   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 52/54] accel/tcg: Unexport tlb_set_page* Richard Henderson
2024-11-14 18:56   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 53/54] accel/tcg: Merge tlb_fill_align into callers Richard Henderson
2024-11-14 18:57   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 54/54] accel/tcg: Return CPUTLBEntryTree from tlb_set_page_full Richard Henderson
2024-11-14 18:59   ` Pierrick Bouvier
2024-11-14 19:56 ` [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Pierrick Bouvier
2024-11-14 20:58   ` Richard Henderson
2024-11-14 21:05     ` Pierrick Bouvier
2024-11-15 11:43       ` Alex Bennée
2024-11-15 17:44         ` Pierrick Bouvier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9bb054fd-40c6-4c44-aa02-52deba8bbc51@linaro.org \
    --to=pierrick.bouvier@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).