From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH 19/23] accel/tcg: Always use IntervalTree for code lookups
Date: Wed, 9 Oct 2024 17:35:05 -0700 [thread overview]
Message-ID: <c81af2ad-fe0d-4f9f-8bc2-23bcd49354ea@linaro.org> (raw)
In-Reply-To: <20241009150855.804605-20-richard.henderson@linaro.org>
On 10/9/24 08:08, Richard Henderson wrote:
> Because translation is special, we don't need the speed
> of the direct-mapped softmmu tlb. We cache a lookups in
> DisasContextBase within the translator loop anyway.
>
> Drop the addr_code comparator from CPUTLBEntry.
> Go directly to the IntervalTree for MMU_INST_FETCH.
> Derive exec flags from read flags.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/exec/cpu-all.h | 3 +
> include/exec/tlb-common.h | 5 +-
> accel/tcg/cputlb.c | 138 +++++++++++++++++++++++++++++---------
> 3 files changed, 110 insertions(+), 36 deletions(-)
>
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index 6f09b86e7f..7f5a10962a 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -326,6 +326,9 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
> (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
> | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
>
> +/* Filter read flags to exec flags. */
> +#define TLB_EXEC_FLAGS_MASK (TLB_MMIO)
> +
> /*
> * Flags stored in CPUTLBEntryFull.slow_flags[x].
> * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
> diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
> index 300f9fae67..feaa471299 100644
> --- a/include/exec/tlb-common.h
> +++ b/include/exec/tlb-common.h
> @@ -26,7 +26,6 @@ typedef union CPUTLBEntry {
> struct {
> uint64_t addr_read;
> uint64_t addr_write;
> - uint64_t addr_code;
> /*
> * Addend to virtual address to get host address. IO accesses
> * use the corresponding iotlb value.
> @@ -35,7 +34,7 @@ typedef union CPUTLBEntry {
> };
> /*
> * Padding to get a power of two size, as well as index
> - * access to addr_{read,write,code}.
> + * access to addr_{read,write}.
> */
> uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
> } CPUTLBEntry;
> @@ -92,7 +91,7 @@ struct CPUTLBEntryFull {
> * Additional tlb flags for use by the slow path. If non-zero,
> * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
> */
> - uint8_t slow_flags[MMU_ACCESS_COUNT];
> + uint8_t slow_flags[2];
>
> /*
> * Allow target-specific additions to this structure.
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 61daa89e06..7c8308355d 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -114,8 +114,9 @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
> MMU_DATA_LOAD * sizeof(uint64_t));
> QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
> MMU_DATA_STORE * sizeof(uint64_t));
> - QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
> - MMU_INST_FETCH * sizeof(uint64_t));
> +
> + tcg_debug_assert(access_type == MMU_DATA_LOAD ||
> + access_type == MMU_DATA_STORE);
>
> #if TARGET_LONG_BITS == 32
> /* Use qatomic_read, in case of addr_write; only care about low bits. */
> @@ -490,8 +491,7 @@ static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
> mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
>
> return (page == (tlb_entry->addr_read & mask) ||
> - page == (tlb_addr_write(tlb_entry) & mask) ||
> - page == (tlb_entry->addr_code & mask));
> + page == (tlb_addr_write(tlb_entry) & mask));
> }
>
> static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
> @@ -1061,15 +1061,13 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
> vaddr address, int flags,
> MMUAccessType access_type, bool enable)
> {
> - if (enable) {
> - address |= flags & TLB_FLAGS_MASK;
> - flags &= TLB_SLOW_FLAGS_MASK;
> - if (flags) {
> - address |= TLB_FORCE_SLOW;
> - }
> - } else {
> - address = -1;
> - flags = 0;
> + if (!enable) {
> + address = TLB_INVALID_MASK;
> + }
> + address |= flags & TLB_FLAGS_MASK;
> + flags &= TLB_SLOW_FLAGS_MASK;
> + if (flags) {
> + address |= TLB_FORCE_SLOW;
> }
I'm not sure to follow this change correctly.
After, the final address and flags value depend on flags in parameter,
while before, it used to depend on flags & enable parameter.
> ent->addr_idx[access_type] = address;
> full->slow_flags[access_type] = flags;
> @@ -1215,9 +1213,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
> /* Now calculate the new entry */
> node->copy.addend = addend - addr_page;
>
> - tlb_set_compare(full, &node->copy, addr_page, read_flags,
> - MMU_INST_FETCH, prot & PAGE_EXEC);
> -
> if (wp_flags & BP_MEM_READ) {
> read_flags |= TLB_WATCHPOINT;
> }
> @@ -1392,21 +1387,52 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
> }
> }
>
> -static int probe_access_internal(CPUState *cpu, vaddr addr,
> - int fault_size, MMUAccessType access_type,
> - int mmu_idx, bool nonfault,
> - void **phost, CPUTLBEntryFull **pfull,
> - uintptr_t retaddr, bool check_mem_cbs)
> +static int probe_access_internal_code(CPUState *cpu, vaddr addr,
> + int fault_size, int mmu_idx,
> + bool nonfault,
> + void **phost, CPUTLBEntryFull **pfull,
> + uintptr_t retaddr)
> +{
> + CPUTLBEntryTree *t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
> + int flags;
> +
> + if (!t || !(t->full.prot & PAGE_EXEC)) {
> + if (!tlb_fill_align(cpu, addr, MMU_INST_FETCH, mmu_idx,
> + 0, fault_size, nonfault, retaddr)) {
> + /* Non-faulting page table read failed. */
> + *phost = NULL;
> + *pfull = NULL;
> + return TLB_INVALID_MASK;
> + }
> + t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
> + }
> + flags = t->copy.addr_read & TLB_EXEC_FLAGS_MASK;
> + *pfull = &t->full;
> +
> + if (flags) {
> + *phost = NULL;
> + return TLB_MMIO;
> + }
> +
> + /* Everything else is RAM. */
> + *phost = (void *)((uintptr_t)addr + t->copy.addend);
> + return flags;
> +}
> +
> +static int probe_access_internal_data(CPUState *cpu, vaddr addr,
> + int fault_size, MMUAccessType access_type,
> + int mmu_idx, bool nonfault,
> + void **phost, CPUTLBEntryFull **pfull,
> + uintptr_t retaddr, bool check_mem_cbs)
> {
> uintptr_t index = tlb_index(cpu, mmu_idx, addr);
> CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
> uint64_t tlb_addr = tlb_read_idx(entry, access_type);
> - vaddr page_addr = addr & TARGET_PAGE_MASK;
> int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
> CPUTLBEntryFull *full;
>
> - if (!tlb_hit_page(tlb_addr, page_addr)) {
> - if (!tlbtree_hit(cpu, mmu_idx, access_type, page_addr)) {
> + if (!tlb_hit(tlb_addr, addr)) {
> + if (!tlbtree_hit(cpu, mmu_idx, access_type, addr)) {
> if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
> 0, fault_size, nonfault, retaddr)) {
> /* Non-faulting page table read failed. */
> @@ -1450,6 +1476,21 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
> return flags;
> }
>
> +static int probe_access_internal(CPUState *cpu, vaddr addr,
> + int fault_size, MMUAccessType access_type,
> + int mmu_idx, bool nonfault,
> + void **phost, CPUTLBEntryFull **pfull,
> + uintptr_t retaddr, bool check_mem_cbs)
> +{
> + if (access_type == MMU_INST_FETCH) {
> + return probe_access_internal_code(cpu, addr, fault_size, mmu_idx,
> + nonfault, phost, pfull, retaddr);
> + }
> + return probe_access_internal_data(cpu, addr, fault_size, access_type,
> + mmu_idx, nonfault, phost, pfull,
> + retaddr, check_mem_cbs);
> +}
> +
> int probe_access_full(CPUArchState *env, vaddr addr, int size,
> MMUAccessType access_type, int mmu_idx,
> bool nonfault, void **phost, CPUTLBEntryFull **pfull,
> @@ -1582,9 +1623,9 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
> CPUTLBEntryFull *full;
> void *p;
>
> - (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
> - cpu_mmu_index(env_cpu(env), true), false,
> - &p, &full, 0, false);
> + (void)probe_access_internal_code(env_cpu(env), addr, 1,
> + cpu_mmu_index(env_cpu(env), true),
> + false, &p, &full, 0);
> if (p == NULL) {
> return -1;
> }
> @@ -1678,8 +1719,31 @@ typedef struct MMULookupLocals {
> * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
> * @mmu_idx may have resized.
> */
> -static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
> - int mmu_idx, MMUAccessType access_type, uintptr_t ra)
> +static bool mmu_lookup1_code(CPUState *cpu, MMULookupPageData *data,
> + MemOp memop, int mmu_idx, uintptr_t ra)
> +{
> + vaddr addr = data->addr;
> + CPUTLBEntryTree *t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
> + bool maybe_resized = true;
> +
> + if (!t || !(t->full.prot & PAGE_EXEC)) {
> + tlb_fill_align(cpu, addr, MMU_INST_FETCH, mmu_idx,
> + memop, data->size, false, ra);
> + maybe_resized = true;
> + t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
> + }
> +
> + data->full = &t->full;
> + data->flags = t->copy.addr_read & TLB_EXEC_FLAGS_MASK;
> + /* Compute haddr speculatively; depending on flags it might be invalid. */
> + data->haddr = (void *)((uintptr_t)addr + t->copy.addend);
> +
> + return maybe_resized;
> +}
> +
> +static bool mmu_lookup1_data(CPUState *cpu, MMULookupPageData *data,
> + MemOp memop, int mmu_idx,
> + MMUAccessType access_type, uintptr_t ra)
> {
> vaddr addr = data->addr;
> uintptr_t index = tlb_index(cpu, mmu_idx, addr);
> @@ -1738,6 +1802,15 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
> return maybe_resized;
> }
>
> +static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
> + int mmu_idx, MMUAccessType access_type, uintptr_t ra)
> +{
> + if (access_type == MMU_INST_FETCH) {
> + return mmu_lookup1_code(cpu, data, memop, mmu_idx, ra);
> + }
> + return mmu_lookup1_data(cpu, data, memop, mmu_idx, access_type, ra);
> +}
> +
> /**
> * mmu_watch_or_dirty
> * @cpu: generic cpu state
> @@ -1885,13 +1958,13 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
> }
> }
>
> + full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
> +
> /*
> * Let the guest notice RMW on a write-only page.
> * We have just verified that the page is writable.
> - * Subpage lookups may have left TLB_INVALID_MASK set,
> - * but addr_read will only be -1 if PAGE_READ was unset.
> */
> - if (unlikely(tlbe->addr_read == -1)) {
> + if (unlikely(!(full->prot & PAGE_READ))) {
> tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
> 0, size, false, retaddr);
> /*
> @@ -1929,7 +2002,6 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
> }
>
> hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
> - full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
>
> if (unlikely(flags & TLB_NOTDIRTY)) {
> notdirty_write(cpu, addr, size, full, retaddr);
Sounds good to have a fast path for code fetch. Did you measure the
benefit, or just implemented this thinking it's worth?
next prev parent reply other threads:[~2024-10-10 0:35 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-09 15:08 [RFC PATCH 00/23] accel/tcg: Convert victim tlb to IntervalTree Richard Henderson
2024-10-09 15:08 ` [PATCH 01/23] util/interval-tree: Introduce interval_tree_free_nodes Richard Henderson
2024-10-09 22:51 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 02/23] accel/tcg: Split out tlbfast_flush_locked Richard Henderson
2024-10-09 18:54 ` Philippe Mathieu-Daudé
2024-10-09 22:53 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 03/23] accel/tcg: Split out tlbfast_{index,entry} Richard Henderson
2024-10-09 22:55 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 04/23] accel/tcg: Split out tlbfast_flush_range_locked Richard Henderson
2024-10-09 23:05 ` Pierrick Bouvier
2024-10-10 1:20 ` Richard Henderson
2024-10-11 17:09 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 05/23] accel/tcg: Fix flags usage in mmu_lookup1, atomic_mmu_lookup Richard Henderson
2024-10-09 23:18 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 06/23] accel/tcg: Early exit for zero length in tlb_flush_range_by_mmuidx* Richard Henderson
2024-10-09 18:53 ` Philippe Mathieu-Daudé
2024-10-09 23:20 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 07/23] accel/tcg: Flush entire tlb when a masked range wraps Richard Henderson
2024-10-09 23:28 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 08/23] accel/tcg: Add IntervalTreeRoot to CPUTLBDesc Richard Henderson
2024-10-09 23:31 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 09/23] accel/tcg: Populate IntervalTree in tlb_set_page_full Richard Henderson
2024-10-09 23:50 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 10/23] accel/tcg: Remove IntervalTree entry in tlb_flush_page_locked Richard Henderson
2024-10-09 23:53 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 11/23] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked Richard Henderson
2024-10-09 23:57 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 12/23] accel/tcg: Process IntervalTree entries in tlb_reset_dirty Richard Henderson
2024-10-10 0:03 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 13/23] accel/tcg: Process IntervalTree entries in tlb_set_dirty Richard Henderson
2024-10-10 0:04 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 14/23] accel/tcg: Replace victim_tlb_hit with tlbtree_hit Richard Henderson
2024-10-10 0:10 ` Pierrick Bouvier
2024-10-10 19:29 ` Richard Henderson
2024-10-11 17:11 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 15/23] accel/tcg: Remove the victim tlb Richard Henderson
2024-10-10 0:12 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 16/23] include/exec/tlb-common: Move CPUTLBEntryFull from hw/core/cpu.h Richard Henderson
2024-10-10 0:17 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 17/23] accel/tcg: Delay plugin adjustment in probe_access_internal Richard Henderson
2024-10-10 0:19 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 18/23] accel/tcg: Call cpu_ld*_code_mmu from cpu_ld*_code Richard Henderson
2024-10-09 18:51 ` Philippe Mathieu-Daudé
2024-10-10 0:23 ` Pierrick Bouvier
2024-10-10 19:31 ` Richard Henderson
2024-10-09 15:08 ` [PATCH 19/23] accel/tcg: Always use IntervalTree for code lookups Richard Henderson
2024-10-10 0:35 ` Pierrick Bouvier [this message]
2024-10-11 14:47 ` Richard Henderson
2024-10-11 17:55 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 20/23] accel/tcg: Link CPUTLBEntry to CPUTLBEntryTree Richard Henderson
2024-10-10 0:37 ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 21/23] accel/tcg: Remove CPUTLBDesc.fulltlb Richard Henderson
2024-10-10 0:38 ` Pierrick Bouvier
2024-10-09 15:08 ` [NOTYET PATCH 22/23] accel/tcg: Drop TCGCPUOps.tlb_fill Richard Henderson
2024-10-10 0:40 ` Pierrick Bouvier
2024-10-09 15:08 ` [NOTYET PATCH 23/23] accel/tcg: Unexport tlb_set_page* Richard Henderson
2024-10-09 16:27 ` [RFC PATCH 00/23] accel/tcg: Convert victim tlb to IntervalTree BALATON Zoltan
2024-10-09 17:10 ` Richard Henderson
2024-10-10 0:50 ` Pierrick Bouvier
2024-10-15 0:07 ` Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c81af2ad-fe0d-4f9f-8bc2-23bcd49354ea@linaro.org \
--to=pierrick.bouvier@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).