qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH 04/23] accel/tcg: Split out tlbfast_flush_range_locked
Date: Wed, 9 Oct 2024 16:05:14 -0700	[thread overview]
Message-ID: <8bbac2bf-704a-4c4c-ae7a-996f5a04038f@linaro.org> (raw)
In-Reply-To: <20241009150855.804605-5-richard.henderson@linaro.org>

On 10/9/24 08:08, Richard Henderson wrote:
> While this may at present be overly complicated for use
> by single page flushes, do so with the expectation that
> this will eventually allow simplification of large pages.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/cputlb.c | 61 +++++++++++++++++++++++++---------------------
>   1 file changed, 33 insertions(+), 28 deletions(-)
> 
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index e37af24525..6773874f2d 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -520,10 +520,37 @@ static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
>       tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
>   }
>   
> +static void tlbfast_flush_range_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
> +                                       vaddr addr, vaddr len, vaddr mask)
> +{
> +    /*
> +     * If @mask is smaller than the tlb size, there may be multiple entries
> +     * within the TLB; for now, just flush the entire TLB.
> +     * Otherwise all addresses that match under @mask hit the same TLB entry.
> +     *
> +     * If @len is larger than the tlb size, then it will take longer to
> +     * test all of the entries in the TLB than it will to flush it all.
> +     */
> +    if (mask < fast->mask || len > fast->mask) {
> +        tlbfast_flush_locked(desc, fast);
> +        return;
> +    }
> +
> +    for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
> +        vaddr page = addr + i;
> +        CPUTLBEntry *entry = tlbfast_entry(fast, page);
> +
> +        if (tlb_flush_entry_mask_locked(entry, page, mask)) {
> +            desc->n_used_entries--;
> +        }
> +    }
> +}
> +
>   static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
>   {
> -    vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
> -    vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
> +    CPUTLBDesc *desc = &cpu->neg.tlb.d[midx];
> +    vaddr lp_addr = desc->large_page_addr;
> +    vaddr lp_mask = desc->large_page_mask;
>   
>       /* Check if we need to flush due to large pages.  */
>       if ((page & lp_mask) == lp_addr) {
> @@ -532,9 +559,8 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
>                     midx, lp_addr, lp_mask);
>           tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
>       } else {
> -        if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
> -            tlb_n_used_entries_dec(cpu, midx);
> -        }
> +        tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
> +                                   page, TARGET_PAGE_SIZE, -1);
>           tlb_flush_vtlb_page_locked(cpu, midx, page);
>       }
>   }
> @@ -689,24 +715,6 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
>       CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
>       vaddr mask = MAKE_64BIT_MASK(0, bits);
>   
> -    /*
> -     * If @bits is smaller than the tlb size, there may be multiple entries
> -     * within the TLB; otherwise all addresses that match under @mask hit
> -     * the same TLB entry.
> -     * TODO: Perhaps allow bits to be a few bits less than the size.
> -     * For now, just flush the entire TLB.
> -     *
> -     * If @len is larger than the tlb size, then it will take longer to
> -     * test all of the entries in the TLB than it will to flush it all.
> -     */
> -    if (mask < f->mask || len > f->mask) {
> -        tlb_debug("forcing full flush midx %d ("
> -                  "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
> -                  midx, addr, mask, len);
> -        tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
> -        return;
> -    }
> -
>       /*
>        * Check if we need to flush due to large pages.
>        * Because large_page_mask contains all 1's from the msb,
> @@ -720,13 +728,10 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
>           return;
>       }
>   
> +    tlbfast_flush_range_locked(d, f, addr, len, mask);
> +
>       for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
>           vaddr page = addr + i;
> -        CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
> -
> -        if (tlb_flush_entry_mask_locked(entry, page, mask)) {
> -            tlb_n_used_entries_dec(cpu, midx);
> -        }
>           tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
>       }
>   }

Why don't we have the same kind of change for 
tlb_flush_vtlb_page_mask_locked?

We know have two loops (for entry mask, and for page mask).


  reply	other threads:[~2024-10-09 23:06 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-09 15:08 [RFC PATCH 00/23] accel/tcg: Convert victim tlb to IntervalTree Richard Henderson
2024-10-09 15:08 ` [PATCH 01/23] util/interval-tree: Introduce interval_tree_free_nodes Richard Henderson
2024-10-09 22:51   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 02/23] accel/tcg: Split out tlbfast_flush_locked Richard Henderson
2024-10-09 18:54   ` Philippe Mathieu-Daudé
2024-10-09 22:53   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 03/23] accel/tcg: Split out tlbfast_{index,entry} Richard Henderson
2024-10-09 22:55   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 04/23] accel/tcg: Split out tlbfast_flush_range_locked Richard Henderson
2024-10-09 23:05   ` Pierrick Bouvier [this message]
2024-10-10  1:20     ` Richard Henderson
2024-10-11 17:09       ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 05/23] accel/tcg: Fix flags usage in mmu_lookup1, atomic_mmu_lookup Richard Henderson
2024-10-09 23:18   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 06/23] accel/tcg: Early exit for zero length in tlb_flush_range_by_mmuidx* Richard Henderson
2024-10-09 18:53   ` Philippe Mathieu-Daudé
2024-10-09 23:20   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 07/23] accel/tcg: Flush entire tlb when a masked range wraps Richard Henderson
2024-10-09 23:28   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 08/23] accel/tcg: Add IntervalTreeRoot to CPUTLBDesc Richard Henderson
2024-10-09 23:31   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 09/23] accel/tcg: Populate IntervalTree in tlb_set_page_full Richard Henderson
2024-10-09 23:50   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 10/23] accel/tcg: Remove IntervalTree entry in tlb_flush_page_locked Richard Henderson
2024-10-09 23:53   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 11/23] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked Richard Henderson
2024-10-09 23:57   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 12/23] accel/tcg: Process IntervalTree entries in tlb_reset_dirty Richard Henderson
2024-10-10  0:03   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 13/23] accel/tcg: Process IntervalTree entries in tlb_set_dirty Richard Henderson
2024-10-10  0:04   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 14/23] accel/tcg: Replace victim_tlb_hit with tlbtree_hit Richard Henderson
2024-10-10  0:10   ` Pierrick Bouvier
2024-10-10 19:29     ` Richard Henderson
2024-10-11 17:11       ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 15/23] accel/tcg: Remove the victim tlb Richard Henderson
2024-10-10  0:12   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 16/23] include/exec/tlb-common: Move CPUTLBEntryFull from hw/core/cpu.h Richard Henderson
2024-10-10  0:17   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 17/23] accel/tcg: Delay plugin adjustment in probe_access_internal Richard Henderson
2024-10-10  0:19   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 18/23] accel/tcg: Call cpu_ld*_code_mmu from cpu_ld*_code Richard Henderson
2024-10-09 18:51   ` Philippe Mathieu-Daudé
2024-10-10  0:23   ` Pierrick Bouvier
2024-10-10 19:31     ` Richard Henderson
2024-10-09 15:08 ` [PATCH 19/23] accel/tcg: Always use IntervalTree for code lookups Richard Henderson
2024-10-10  0:35   ` Pierrick Bouvier
2024-10-11 14:47     ` Richard Henderson
2024-10-11 17:55       ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 20/23] accel/tcg: Link CPUTLBEntry to CPUTLBEntryTree Richard Henderson
2024-10-10  0:37   ` Pierrick Bouvier
2024-10-09 15:08 ` [PATCH 21/23] accel/tcg: Remove CPUTLBDesc.fulltlb Richard Henderson
2024-10-10  0:38   ` Pierrick Bouvier
2024-10-09 15:08 ` [NOTYET PATCH 22/23] accel/tcg: Drop TCGCPUOps.tlb_fill Richard Henderson
2024-10-10  0:40   ` Pierrick Bouvier
2024-10-09 15:08 ` [NOTYET PATCH 23/23] accel/tcg: Unexport tlb_set_page* Richard Henderson
2024-10-09 16:27 ` [RFC PATCH 00/23] accel/tcg: Convert victim tlb to IntervalTree BALATON Zoltan
2024-10-09 17:10   ` Richard Henderson
2024-10-10  0:50     ` Pierrick Bouvier
2024-10-15  0:07       ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8bbac2bf-704a-4c4c-ae7a-996f5a04038f@linaro.org \
    --to=pierrick.bouvier@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).