From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH v2 48/54] target/sparc: Convert to TCGCPUOps.tlb_fill_align
Date: Thu, 14 Nov 2024 10:54:31 -0800 [thread overview]
Message-ID: <b62678d5-59e2-44aa-a09a-5fc3960e20c1@linaro.org> (raw)
In-Reply-To: <20241114160131.48616-49-richard.henderson@linaro.org>
On 11/14/24 08:01, Richard Henderson wrote:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> target/sparc/cpu.h | 8 ++++---
> target/sparc/cpu.c | 2 +-
> target/sparc/mmu_helper.c | 44 +++++++++++++++++++++++++--------------
> 3 files changed, 34 insertions(+), 20 deletions(-)
>
> diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
> index f517e5a383..4c8927e9fa 100644
> --- a/target/sparc/cpu.h
> +++ b/target/sparc/cpu.h
> @@ -4,6 +4,7 @@
> #include "qemu/bswap.h"
> #include "cpu-qom.h"
> #include "exec/cpu-defs.h"
> +#include "exec/memop.h"
> #include "qemu/cpu-float.h"
>
> #if !defined(TARGET_SPARC64)
> @@ -596,9 +597,10 @@ G_NORETURN void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t);
> void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
> void sparc_cpu_list(void);
> /* mmu_helper.c */
> -bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> - MMUAccessType access_type, int mmu_idx,
> - bool probe, uintptr_t retaddr);
> +bool sparc_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out,
> + vaddr addr, MMUAccessType access_type,
> + int mmu_idx, MemOp memop, int size,
> + bool probe, uintptr_t retaddr);
> target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
> void dump_mmu(CPUSPARCState *env);
>
> diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
> index dd7af86de7..57ae53bd71 100644
> --- a/target/sparc/cpu.c
> +++ b/target/sparc/cpu.c
> @@ -932,7 +932,7 @@ static const TCGCPUOps sparc_tcg_ops = {
> .restore_state_to_opc = sparc_restore_state_to_opc,
>
> #ifndef CONFIG_USER_ONLY
> - .tlb_fill = sparc_cpu_tlb_fill,
> + .tlb_fill_align = sparc_cpu_tlb_fill_align,
> .cpu_exec_interrupt = sparc_cpu_exec_interrupt,
> .cpu_exec_halt = sparc_cpu_has_work,
> .do_interrupt = sparc_cpu_do_interrupt,
> diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
> index 9ff06026b8..32766a37d6 100644
> --- a/target/sparc/mmu_helper.c
> +++ b/target/sparc/mmu_helper.c
> @@ -203,12 +203,12 @@ static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
> }
>
> /* Perform address translation */
> -bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> - MMUAccessType access_type, int mmu_idx,
> - bool probe, uintptr_t retaddr)
> +bool sparc_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out,
> + vaddr address, MMUAccessType access_type,
> + int mmu_idx, MemOp memop, int size,
> + bool probe, uintptr_t retaddr)
> {
> CPUSPARCState *env = cpu_env(cs);
> - CPUTLBEntryFull full = {};
> target_ulong vaddr;
> int error_code = 0, access_index;
>
> @@ -220,16 +220,21 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> */
> assert(!probe);
>
> + if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
> + sparc_cpu_do_unaligned_access(cs, address, access_type,
> + mmu_idx, retaddr);
> + }
> +
> + memset(out, 0, sizeof(*out));
> address &= TARGET_PAGE_MASK;
> - error_code = get_physical_address(env, &full, &access_index,
> + error_code = get_physical_address(env, out, &access_index,
> address, access_type, mmu_idx);
> vaddr = address;
> if (likely(error_code == 0)) {
> qemu_log_mask(CPU_LOG_MMU,
> "Translate at %" VADDR_PRIx " -> "
> HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
> - address, full.phys_addr, vaddr);
> - tlb_set_page_full(cs, mmu_idx, vaddr, &full);
> + address, out->phys_addr, vaddr);
> return true;
> }
>
> @@ -244,8 +249,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> permissions. If no mapping is available, redirect accesses to
> neverland. Fake/overridden mappings will be flushed when
> switching to normal mode. */
> - full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
> - tlb_set_page_full(cs, mmu_idx, vaddr, &full);
> + out->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
> return true;
> } else {
> if (access_type == MMU_INST_FETCH) {
> @@ -754,22 +758,30 @@ static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
> }
>
> /* Perform address translation */
> -bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> - MMUAccessType access_type, int mmu_idx,
> - bool probe, uintptr_t retaddr)
> +bool sparc_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out,
> + vaddr address, MMUAccessType access_type,
> + int mmu_idx, MemOp memop, int size,
> + bool probe, uintptr_t retaddr)
> {
> CPUSPARCState *env = cpu_env(cs);
> - CPUTLBEntryFull full = {};
> int error_code = 0, access_index;
>
> + if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
> + if (probe) {
> + return false;
> + }
> + sparc_cpu_do_unaligned_access(cs, address, access_type,
> + mmu_idx, retaddr);
> + }
> +
> + memset(out, 0, sizeof(*out));
> address &= TARGET_PAGE_MASK;
> - error_code = get_physical_address(env, &full, &access_index,
> + error_code = get_physical_address(env, out, &access_index,
> address, access_type, mmu_idx);
> if (likely(error_code == 0)) {
> - trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl,
> + trace_mmu_helper_mmu_fault(address, out->phys_addr, mmu_idx, env->tl,
> env->dmmu.mmu_primary_context,
> env->dmmu.mmu_secondary_context);
> - tlb_set_page_full(cs, mmu_idx, address, &full);
> return true;
> }
> if (probe) {
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
next prev parent reply other threads:[~2024-11-14 18:56 UTC|newest]
Thread overview: 114+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-14 16:00 [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Richard Henderson
2024-11-14 16:00 ` [PATCH v2 01/54] util/interval-tree: Introduce interval_tree_free_nodes Richard Henderson
2024-11-14 17:51 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 02/54] accel/tcg: Split out tlbfast_flush_locked Richard Henderson
2024-11-14 17:52 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 03/54] accel/tcg: Split out tlbfast_{index,entry} Richard Henderson
2024-11-14 17:52 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 04/54] accel/tcg: Split out tlbfast_flush_range_locked Richard Henderson
2024-11-14 16:00 ` [PATCH v2 05/54] accel/tcg: Fix flags usage in mmu_lookup1, atomic_mmu_lookup Richard Henderson
2024-11-14 17:54 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 06/54] accel/tcg: Assert non-zero length in tlb_flush_range_by_mmuidx* Richard Henderson
2024-11-14 17:56 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 07/54] accel/tcg: Assert bits in range " Richard Henderson
2024-11-14 17:56 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 08/54] accel/tcg: Flush entire tlb when a masked range wraps Richard Henderson
2024-11-14 17:58 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 09/54] accel/tcg: Add IntervalTreeRoot to CPUTLBDesc Richard Henderson
2024-11-14 17:59 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 10/54] accel/tcg: Populate IntervalTree in tlb_set_page_full Richard Henderson
2024-11-14 18:00 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 11/54] accel/tcg: Remove IntervalTree entry in tlb_flush_page_locked Richard Henderson
2024-11-14 18:01 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 12/54] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked Richard Henderson
2024-11-14 18:01 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 13/54] accel/tcg: Process IntervalTree entries in tlb_reset_dirty Richard Henderson
2024-11-14 18:02 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 14/54] accel/tcg: Process IntervalTree entries in tlb_set_dirty Richard Henderson
2024-11-14 18:02 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 15/54] accel/tcg: Use tlb_hit_page in victim_tlb_hit Richard Henderson
2024-11-14 18:03 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 16/54] accel/tcg: Pass full addr to victim_tlb_hit Richard Henderson
2024-11-14 18:04 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 17/54] accel/tcg: Replace victim_tlb_hit with tlbtree_hit Richard Henderson
2024-11-14 18:06 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 18/54] accel/tcg: Remove the victim tlb Richard Henderson
2024-11-14 18:07 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 19/54] accel/tcg: Remove tlb_n_used_entries_inc Richard Henderson
2024-11-14 18:07 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 20/54] include/exec/tlb-common: Move CPUTLBEntryFull from hw/core/cpu.h Richard Henderson
2024-11-14 18:08 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 21/54] accel/tcg: Delay plugin adjustment in probe_access_internal Richard Henderson
2024-11-14 18:09 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 22/54] accel/tcg: Call cpu_ld*_code_mmu from cpu_ld*_code Richard Henderson
2024-11-14 18:09 ` Pierrick Bouvier
2024-11-14 16:00 ` [PATCH v2 23/54] accel/tcg: Check original prot bits for read in atomic_mmu_lookup Richard Henderson
2024-11-14 18:09 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 24/54] accel/tcg: Preserve tlb flags in tlb_set_compare Richard Henderson
2024-11-14 18:11 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 25/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full_mmu Richard Henderson
2024-11-14 18:11 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 26/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full Richard Henderson
2024-11-14 18:12 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 27/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_internal Richard Henderson
2024-11-14 18:13 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 28/54] accel/tcg: Introduce tlb_lookup Richard Henderson
2024-11-14 18:29 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 29/54] accel/tcg: Partially unify MMULookupPageData and TLBLookupOutput Richard Henderson
2024-11-14 18:29 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 30/54] accel/tcg: Merge mmu_lookup1 into mmu_lookup Richard Henderson
2024-11-14 18:31 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 31/54] accel/tcg: Always use IntervalTree for code lookups Richard Henderson
2024-11-14 18:32 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 32/54] accel/tcg: Link CPUTLBEntry to CPUTLBEntryTree Richard Henderson
2024-11-14 18:39 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb Richard Henderson
2024-11-14 18:49 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 34/54] target/alpha: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 35/54] target/avr: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 36/54] target/i386: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 37/54] target/loongarch: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 38/54] target/m68k: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 39/54] target/m68k: Do not call tlb_set_page in helper_ptest Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 40/54] target/microblaze: Convert to TCGCPUOps.tlb_fill_align Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 41/54] target/mips: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 42/54] target/openrisc: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 43/54] target/ppc: " Richard Henderson
2024-11-14 18:53 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 44/54] target/riscv: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 45/54] target/rx: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 46/54] target/s390x: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 47/54] target/sh4: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 48/54] target/sparc: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier [this message]
2024-11-14 16:01 ` [PATCH v2 49/54] target/tricore: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 50/54] target/xtensa: " Richard Henderson
2024-11-14 18:54 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 51/54] accel/tcg: Drop TCGCPUOps.tlb_fill Richard Henderson
2024-11-14 18:55 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 52/54] accel/tcg: Unexport tlb_set_page* Richard Henderson
2024-11-14 18:56 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 53/54] accel/tcg: Merge tlb_fill_align into callers Richard Henderson
2024-11-14 18:57 ` Pierrick Bouvier
2024-11-14 16:01 ` [PATCH v2 54/54] accel/tcg: Return CPUTLBEntryTree from tlb_set_page_full Richard Henderson
2024-11-14 18:59 ` Pierrick Bouvier
2024-11-14 19:56 ` [PATCH for-10.0 v2 00/54] accel/tcg: Convert victim tlb to IntervalTree Pierrick Bouvier
2024-11-14 20:58 ` Richard Henderson
2024-11-14 21:05 ` Pierrick Bouvier
2024-11-15 11:43 ` Alex Bennée
2024-11-15 17:44 ` Pierrick Bouvier
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b62678d5-59e2-44aa-a09a-5fc3960e20c1@linaro.org \
--to=pierrick.bouvier@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).