From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH v2 18/54] accel/tcg: Remove the victim tlb
Date: Thu, 14 Nov 2024 10:07:26 -0800 [thread overview]
Message-ID: <77371ae7-f123-4577-b44b-44ad7d9d19cd@linaro.org> (raw)
In-Reply-To: <20241114160131.48616-19-richard.henderson@linaro.org>
On 11/14/24 08:00, Richard Henderson wrote:
> This has been functionally replaced by the IntervalTree.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> include/hw/core/cpu.h | 8 -----
> accel/tcg/cputlb.c | 74 -------------------------------------------
> 2 files changed, 82 deletions(-)
>
> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
> index 1ebc999a73..8eda0574b2 100644
> --- a/include/hw/core/cpu.h
> +++ b/include/hw/core/cpu.h
> @@ -201,9 +201,6 @@ struct CPUClass {
> */
> #define NB_MMU_MODES 16
>
> -/* Use a fully associative victim tlb of 8 entries. */
> -#define CPU_VTLB_SIZE 8
> -
> /*
> * The full TLB entry, which is not accessed by generated TCG code,
> * so the layout is not as critical as that of CPUTLBEntry. This is
> @@ -285,11 +282,6 @@ typedef struct CPUTLBDesc {
> /* maximum number of entries observed in the window */
> size_t window_max_entries;
> size_t n_used_entries;
> - /* The next index to use in the tlb victim table. */
> - size_t vindex;
> - /* The tlb victim table, in two parts. */
> - CPUTLBEntry vtable[CPU_VTLB_SIZE];
> - CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
> CPUTLBEntryFull *fulltlb;
> /* All active tlb entries for this address space. */
> IntervalTreeRoot iroot;
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index ea4b78866b..8caa8c0f1d 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -328,8 +328,6 @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
> tlbfast_flush_locked(desc, fast);
> desc->large_page_addr = -1;
> desc->large_page_mask = -1;
> - desc->vindex = 0;
> - memset(desc->vtable, -1, sizeof(desc->vtable));
> interval_tree_free_nodes(&desc->iroot, offsetof(CPUTLBEntryTree, itree));
> }
>
> @@ -361,11 +359,6 @@ static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
> cpu->neg.tlb.d[mmu_idx].n_used_entries++;
> }
>
> -static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
> -{
> - cpu->neg.tlb.d[mmu_idx].n_used_entries--;
> -}
> -
> void tlb_init(CPUState *cpu)
> {
> int64_t now = get_clock_realtime();
> @@ -496,20 +489,6 @@ static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
> page == (tlb_entry->addr_code & mask));
> }
>
> -static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
> -{
> - return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
> -}
> -
> -/**
> - * tlb_entry_is_empty - return true if the entry is not in use
> - * @te: pointer to CPUTLBEntry
> - */
> -static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
> -{
> - return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
> -}
> -
> /* Called with tlb_c.lock held */
> static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
> vaddr page,
> @@ -522,28 +501,6 @@ static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
> return false;
> }
>
> -/* Called with tlb_c.lock held */
> -static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
> - vaddr page,
> - vaddr mask)
> -{
> - CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
> - int k;
> -
> - assert_cpu_is_self(cpu);
> - for (k = 0; k < CPU_VTLB_SIZE; k++) {
> - if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
> - tlb_n_used_entries_dec(cpu, mmu_idx);
> - }
> - }
> -}
> -
> -static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
> - vaddr page)
> -{
> - tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
> -}
> -
> static void tlbfast_flush_range_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
> vaddr addr, vaddr len, vaddr mask)
> {
> @@ -588,7 +545,6 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
>
> tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
> page, TARGET_PAGE_SIZE, -1);
> - tlb_flush_vtlb_page_locked(cpu, midx, page);
>
> node = tlbtree_lookup_addr(desc, page);
> if (node) {
> @@ -764,11 +720,6 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
>
> tlbfast_flush_range_locked(d, f, addr, len, mask);
>
> - for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
> - vaddr page = addr + i;
> - tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
> - }
> -
> addr_mask = addr & mask;
> last_mask = addr_mask + len - 1;
> last_imask = last_mask | ~mask;
> @@ -1017,10 +968,6 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
> tlb_reset_dirty_range_locked(&fast->table[i], start1, length);
> }
>
> - for (size_t i = 0; i < CPU_VTLB_SIZE; i++) {
> - tlb_reset_dirty_range_locked(&desc->vtable[i], start1, length);
> - }
> -
> for (CPUTLBEntryTree *t = tlbtree_lookup_range(desc, 0, -1); t;
> t = tlbtree_lookup_range_next(t, 0, -1)) {
> tlb_reset_dirty_range_locked(&t->copy, start1, length);
> @@ -1054,10 +1001,6 @@ static void tlb_set_dirty(CPUState *cpu, vaddr addr)
>
> tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
>
> - for (int k = 0; k < CPU_VTLB_SIZE; k++) {
> - tlb_set_dirty1_locked(&desc->vtable[k], addr);
> - }
> -
> node = tlbtree_lookup_addr(desc, addr);
> if (node) {
> tlb_set_dirty1_locked(&node->copy, addr);
> @@ -1216,23 +1159,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
> /* Note that the tlb is no longer clean. */
> tlb->c.dirty |= 1 << mmu_idx;
>
> - /* Make sure there's no cached translation for the new page. */
> - tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
> -
> - /*
> - * Only evict the old entry to the victim tlb if it's for a
> - * different page; otherwise just overwrite the stale data.
> - */
> - if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
> - unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
> - CPUTLBEntry *tv = &desc->vtable[vidx];
> -
> - /* Evict the old entry into the victim tlb. */
> - copy_tlb_helper_locked(tv, te);
> - desc->vfulltlb[vidx] = desc->fulltlb[index];
> - tlb_n_used_entries_dec(cpu, mmu_idx);
> - }
> -
> /* Replace an old IntervalTree entry, or create a new one. */
> node = tlbtree_lookup_addr(desc, addr_page);
> if (!node) {
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
next prev parent reply other threads:[~2024-11-14 18:07 UTC|newest]
Thread overview: 114+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-14 16:00 [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Richard Henderson
2024-11-14 16:00 ` [PATCH v2 01/54] util/interval-tree: Introduce interval_tree_free_nodes Richard Henderson
2024-11-14 17:51 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 02/54] accel/tcg: Split out tlbfast_flush_locked Richard Henderson
2024-11-14 17:52 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 03/54] accel/tcg: Split out tlbfast_{index,entry} Richard Henderson
2024-11-14 17:52 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 04/54] accel/tcg: Split out tlbfast_flush_range_locked Richard Henderson
2024-11-14 16:00 ` [PATCH v2 05/54] accel/tcg: Fix flags usage in mmu_lookup1, atomic_mmu_lookup Richard Henderson
2024-11-14 17:54 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 06/54] accel/tcg: Assert non-zero length in tlb_flush_range_by_mmuidx* Richard Henderson
2024-11-14 17:56 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 07/54] accel/tcg: Assert bits in range " Richard Henderson
2024-11-14 17:56 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 08/54] accel/tcg: Flush entire tlb when a masked range wraps Richard Henderson
2024-11-14 17:58 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 09/54] accel/tcg: Add IntervalTreeRoot to CPUTLBDesc Richard Henderson
2024-11-14 17:59 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 10/54] accel/tcg: Populate IntervalTree in tlb_set_page_full Richard Henderson
2024-11-14 18:00 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 11/54] accel/tcg: Remove IntervalTree entry in tlb_flush_page_locked Richard Henderson
2024-11-14 18:01 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 12/54] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked Richard Henderson
2024-11-14 18:01 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 13/54] accel/tcg: Process IntervalTree entries in tlb_reset_dirty Richard Henderson
2024-11-14 18:02 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 14/54] accel/tcg: Process IntervalTree entries in tlb_set_dirty Richard Henderson
2024-11-14 18:02 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 15/54] accel/tcg: Use tlb_hit_page in victim_tlb_hit Richard Henderson
2024-11-14 18:03 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 16/54] accel/tcg: Pass full addr to victim_tlb_hit Richard Henderson
2024-11-14 18:04 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 17/54] accel/tcg: Replace victim_tlb_hit with tlbtree_hit Richard Henderson
2024-11-14 18:06 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 18/54] accel/tcg: Remove the victim tlb Richard Henderson
2024-11-14 18:07 ` Pierrick Bouvier [this message]
2024-11-14 16:00 ` [PATCH v2 19/54] accel/tcg: Remove tlb_n_used_entries_inc Richard Henderson
2024-11-14 18:07 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 20/54] include/exec/tlb-common: Move CPUTLBEntryFull from hw/core/cpu.h Richard Henderson
2024-11-14 18:08 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 21/54] accel/tcg: Delay plugin adjustment in probe_access_internal Richard Henderson
2024-11-14 18:09 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 22/54] accel/tcg: Call cpu_ld*_code_mmu from cpu_ld*_code Richard Henderson
2024-11-14 18:09 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 23/54] accel/tcg: Check original prot bits for read in atomic_mmu_lookup Richard Henderson
2024-11-14 18:09 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 24/54] accel/tcg: Preserve tlb flags in tlb_set_compare Richard Henderson
2024-11-14 18:11 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 25/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full_mmu Richard Henderson
2024-11-14 18:11 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 26/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full Richard Henderson
2024-11-14 18:12 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 27/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_internal Richard Henderson
2024-11-14 18:13 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 28/54] accel/tcg: Introduce tlb_lookup Richard Henderson
2024-11-14 18:29 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 29/54] accel/tcg: Partially unify MMULookupPageData and TLBLookupOutput Richard Henderson
2024-11-14 18:29 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 30/54] accel/tcg: Merge mmu_lookup1 into mmu_lookup Richard Henderson
2024-11-14 18:31 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 31/54] accel/tcg: Always use IntervalTree for code lookups Richard Henderson
2024-11-14 18:32 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 32/54] accel/tcg: Link CPUTLBEntry to CPUTLBEntryTree Richard Henderson
2024-11-14 18:39 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb Richard Henderson
2024-11-14 18:49 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 34/54] target/alpha: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 35/54] target/avr: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 36/54] target/i386: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 37/54] target/loongarch: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 38/54] target/m68k: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 39/54] target/m68k: Do not call tlb_set_page in helper_ptest Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 40/54] target/microblaze: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 41/54] target/mips: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 42/54] target/openrisc: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 43/54] target/ppc: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 44/54] target/riscv: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 45/54] target/rx: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 46/54] target/s390x: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 47/54] target/sh4: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 48/54] target/sparc: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 49/54] target/tricore: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 50/54] target/xtensa: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 51/54] accel/tcg: Drop TCGCPUOps.tlb_fill Richard Henderson
2024-11-14 18:55 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 52/54] accel/tcg: Unexport tlb_set_page* Richard Henderson
2024-11-14 18:56 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 53/54] accel/tcg: Merge tlb_fill_align into callers Richard Henderson
2024-11-14 18:57 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 54/54] accel/tcg: Return CPUTLBEntryTree from tlb_set_page_full Richard Henderson
2024-11-14 18:59 ` Pierrick Bouvier
2024-11-14 19:56 ` [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Pierrick Bouvier
2024-11-14 20:58 ` Richard Henderson
2024-11-14 21:05 ` Pierrick Bouvier
2024-11-15 11:43 ` Alex Bennée
2024-11-15 17:44 ` Pierrick Bouvier
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=77371ae7-f123-4577-b44b-44ad7d9d19cd@linaro.org \
--to=pierrick.bouvier@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).