qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb
Date: Thu, 14 Nov 2024 10:49:47 -0800	[thread overview]
Message-ID: <057e02e0-eb01-43b2-b48c-602bb84d0989@linaro.org> (raw)
In-Reply-To: <20241114160131.48616-34-richard.henderson@linaro.org>

On 11/14/24 08:01, Richard Henderson wrote:
> This array is now write-only, and may be removed.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   include/hw/core/cpu.h |  1 -
>   accel/tcg/cputlb.c    | 34 +++++++---------------------------
>   2 files changed, 7 insertions(+), 28 deletions(-)
> 
> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
> index 4364ddb1db..5c069f2a00 100644
> --- a/include/hw/core/cpu.h
> +++ b/include/hw/core/cpu.h
> @@ -219,7 +219,6 @@ typedef struct CPUTLBDesc {
>       /* maximum number of entries observed in the window */
>       size_t window_max_entries;
>       size_t n_used_entries;
> -    CPUTLBEntryFull *fulltlb;
>       /* All active tlb entries for this address space. */
>       IntervalTreeRoot iroot;
>   } CPUTLBDesc;
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 3282436752..7f63dc3fd8 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -149,13 +149,6 @@ static inline CPUTLBEntry *tlbfast_entry(CPUTLBDescFast *fast, vaddr addr)
>       return fast->table + tlbfast_index(fast, addr);
>   }
>   
> -/* Find the TLB index corresponding to the mmu_idx + address pair.  */
> -static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
> -                                  vaddr addr)
> -{
> -    return tlbfast_index(&cpu->neg.tlb.f[mmu_idx], addr);
> -}
> -
>   /* Find the TLB entry corresponding to the mmu_idx + address pair.  */
>   static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
>                                        vaddr addr)
> @@ -270,22 +263,20 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
>       }
>   
>       g_free(fast->table);
> -    g_free(desc->fulltlb);
>   
>       tlb_window_reset(desc, now, 0);
>       /* desc->n_used_entries is cleared by the caller */
>       fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
>       fast->table = g_try_new(CPUTLBEntry, new_size);
> -    desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
>   
>       /*
> -     * If the allocations fail, try smaller sizes. We just freed some
> +     * If the allocation fails, try smaller sizes. We just freed some
>        * memory, so going back to half of new_size has a good chance of working.
>        * Increased memory pressure elsewhere in the system might cause the
>        * allocations to fail though, so we progressively reduce the allocation
>        * size, aborting if we cannot even allocate the smallest TLB we support.
>        */
> -    while (fast->table == NULL || desc->fulltlb == NULL) {
> +    while (fast->table == NULL) {
>           if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
>               error_report("%s: %s", __func__, strerror(errno));
>               abort();
> @@ -294,9 +285,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
>           fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
>   
>           g_free(fast->table);
> -        g_free(desc->fulltlb);
>           fast->table = g_try_new(CPUTLBEntry, new_size);
> -        desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
>       }
>   }
>   
> @@ -350,7 +339,6 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
>       desc->n_used_entries = 0;
>       fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
>       fast->table = g_new(CPUTLBEntry, n_entries);
> -    desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
>       memset(&desc->iroot, 0, sizeof(desc->iroot));
>       tlb_mmu_flush_locked(desc, fast);
>   }
> @@ -372,15 +360,9 @@ void tlb_init(CPUState *cpu)
>   
>   void tlb_destroy(CPUState *cpu)
>   {
> -    int i;
> -
>       qemu_spin_destroy(&cpu->neg.tlb.c.lock);
> -    for (i = 0; i < NB_MMU_MODES; i++) {
> -        CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
> -        CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
> -
> -        g_free(fast->table);
> -        g_free(desc->fulltlb);
> +    for (int i = 0; i < NB_MMU_MODES; i++) {
> +        g_free(cpu->neg.tlb.f[i].table);
>           interval_tree_free_nodes(&cpu->neg.tlb.d[i].iroot,
>                                    offsetof(CPUTLBEntryTree, itree));
>       }
> @@ -1061,7 +1043,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
>       CPUTLB *tlb = &cpu->neg.tlb;
>       CPUTLBDesc *desc = &tlb->d[mmu_idx];
>       MemoryRegionSection *section;
> -    unsigned int index, read_flags, write_flags;
> +    unsigned int read_flags, write_flags;
>       uintptr_t addend;
>       CPUTLBEntry *te;
>       CPUTLBEntryTree *node;
> @@ -1140,7 +1122,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
>       wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
>                                                 TARGET_PAGE_SIZE);
>   
> -    index = tlb_index(cpu, mmu_idx, addr_page);
>       te = tlb_entry(cpu, mmu_idx, addr_page);
>   
>       /*
> @@ -1179,8 +1160,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
>        * subtract here is that of the page base, and not the same as the
>        * vaddr we add back in io_prepare()/get_page_addr_code().
>        */
> -    desc->fulltlb[index] = *full;
> -    full = &desc->fulltlb[index];
> +    node->full = *full;
> +    full = &node->full;
>       full->xlat_section = iotlb - addr_page;
>       full->phys_addr = paddr_page;
>   
> @@ -1203,7 +1184,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
>       tlb_set_compare(full, &node->copy, addr_page, write_flags,
>                       MMU_DATA_STORE, prot & PAGE_WRITE);
>   
> -    node->full = *full;
>       copy_tlb_helper_locked(te, &node->copy);
>       desc->n_used_entries++;
>       qemu_spin_unlock(&tlb->c.lock);

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>



  reply	other threads:[~2024-11-14 18:50 UTC|newest]

Thread overview: 114+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-14 16:00 [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Richard Henderson
2024-11-14 16:00 ` [PATCH v2 01/54] util/interval-tree: Introduce interval_tree_free_nodes Richard Henderson
2024-11-14 17:51   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 02/54] accel/tcg: Split out tlbfast_flush_locked Richard Henderson
2024-11-14 17:52   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 03/54] accel/tcg: Split out tlbfast_{index,entry} Richard Henderson
2024-11-14 17:52   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 04/54] accel/tcg: Split out tlbfast_flush_range_locked Richard Henderson
2024-11-14 16:00 ` [PATCH v2 05/54] accel/tcg: Fix flags usage in mmu_lookup1, atomic_mmu_lookup Richard Henderson
2024-11-14 17:54   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 06/54] accel/tcg: Assert non-zero length in tlb_flush_range_by_mmuidx* Richard Henderson
2024-11-14 17:56   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 07/54] accel/tcg: Assert bits in range " Richard Henderson
2024-11-14 17:56   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 08/54] accel/tcg: Flush entire tlb when a masked range wraps Richard Henderson
2024-11-14 17:58   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 09/54] accel/tcg: Add IntervalTreeRoot to CPUTLBDesc Richard Henderson
2024-11-14 17:59   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 10/54] accel/tcg: Populate IntervalTree in tlb_set_page_full Richard Henderson
2024-11-14 18:00   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 11/54] accel/tcg: Remove IntervalTree entry in tlb_flush_page_locked Richard Henderson
2024-11-14 18:01   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 12/54] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked Richard Henderson
2024-11-14 18:01   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 13/54] accel/tcg: Process IntervalTree entries in tlb_reset_dirty Richard Henderson
2024-11-14 18:02   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 14/54] accel/tcg: Process IntervalTree entries in tlb_set_dirty Richard Henderson
2024-11-14 18:02   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 15/54] accel/tcg: Use tlb_hit_page in victim_tlb_hit Richard Henderson
2024-11-14 18:03   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 16/54] accel/tcg: Pass full addr to victim_tlb_hit Richard Henderson
2024-11-14 18:04   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 17/54] accel/tcg: Replace victim_tlb_hit with tlbtree_hit Richard Henderson
2024-11-14 18:06   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 18/54] accel/tcg: Remove the victim tlb Richard Henderson
2024-11-14 18:07   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 19/54] accel/tcg: Remove tlb_n_used_entries_inc Richard Henderson
2024-11-14 18:07   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 20/54] include/exec/tlb-common: Move CPUTLBEntryFull from hw/core/cpu.h Richard Henderson
2024-11-14 18:08   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 21/54] accel/tcg: Delay plugin adjustment in probe_access_internal Richard Henderson
2024-11-14 18:09   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 22/54] accel/tcg: Call cpu_ld*_code_mmu from cpu_ld*_code Richard Henderson
2024-11-14 18:09   ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 23/54] accel/tcg: Check original prot bits for read in atomic_mmu_lookup Richard Henderson
2024-11-14 18:09   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 24/54] accel/tcg: Preserve tlb flags in tlb_set_compare Richard Henderson
2024-11-14 18:11   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 25/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full_mmu Richard Henderson
2024-11-14 18:11   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 26/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full Richard Henderson
2024-11-14 18:12   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 27/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_internal Richard Henderson
2024-11-14 18:13   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 28/54] accel/tcg: Introduce tlb_lookup Richard Henderson
2024-11-14 18:29   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 29/54] accel/tcg: Partially unify MMULookupPageData and TLBLookupOutput Richard Henderson
2024-11-14 18:29   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 30/54] accel/tcg: Merge mmu_lookup1 into mmu_lookup Richard Henderson
2024-11-14 18:31   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 31/54] accel/tcg: Always use IntervalTree for code lookups Richard Henderson
2024-11-14 18:32   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 32/54] accel/tcg: Link CPUTLBEntry to CPUTLBEntryTree Richard Henderson
2024-11-14 18:39   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb Richard Henderson
2024-11-14 18:49   ` Pierrick Bouvier [this message]
2024-11-14 16:01 ` [PATCH v2 34/54] target/alpha: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 35/54] target/avr: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 36/54] target/i386: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 37/54] target/loongarch: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 38/54] target/m68k: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 39/54] target/m68k: Do not call tlb_set_page in helper_ptest Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 40/54] target/microblaze: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 41/54] target/mips: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 42/54] target/openrisc: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 43/54] target/ppc: " Richard Henderson
2024-11-14 18:53   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 44/54] target/riscv: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 45/54] target/rx: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 46/54] target/s390x: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 47/54] target/sh4: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 48/54] target/sparc: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 49/54] target/tricore: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 50/54] target/xtensa: " Richard Henderson
2024-11-14 18:54   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 51/54] accel/tcg: Drop TCGCPUOps.tlb_fill Richard Henderson
2024-11-14 18:55   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 52/54] accel/tcg: Unexport tlb_set_page* Richard Henderson
2024-11-14 18:56   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 53/54] accel/tcg: Merge tlb_fill_align into callers Richard Henderson
2024-11-14 18:57   ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 54/54] accel/tcg: Return CPUTLBEntryTree from tlb_set_page_full Richard Henderson
2024-11-14 18:59   ` Pierrick Bouvier
2024-11-14 19:56 ` [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Pierrick Bouvier
2024-11-14 20:58   ` Richard Henderson
2024-11-14 21:05     ` Pierrick Bouvier
2024-11-15 11:43       ` Alex Bennée
2024-11-15 17:44         ` Pierrick Bouvier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=057e02e0-eb01-43b2-b48c-602bb84d0989@linaro.org \
    --to=pierrick.bouvier@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).