From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [PULL 25/39] accel/tcg: Split out io_prepare and io_failed
Date: Fri, 15 Sep 2023 20:29:57 -0700 [thread overview]
Message-ID: <20230916033011.479144-26-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230916033011.479144-1-richard.henderson@linaro.org>
These are common code from io_readx and io_writex.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 77 +++++++++++++++++++++++++++-------------------
1 file changed, 45 insertions(+), 32 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 9cbcd202d2..ae4ad591fe 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1267,7 +1267,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* (non-page-aligned) vaddr of the eventual memory access to get
* the MemoryRegion offset for the access. Note that the vaddr we
* subtract here is that of the page base, and not the same as the
- * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
+ * vaddr we add back in io_prepare()/get_page_addr_code().
*/
desc->fulltlb[index] = *full;
full = &desc->fulltlb[index];
@@ -1367,37 +1367,60 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
}
}
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
- int mmu_idx, vaddr addr, uintptr_t retaddr,
- MMUAccessType access_type, MemOp op)
+static MemoryRegionSection *
+io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
+ MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
{
CPUState *cpu = env_cpu(env);
- hwaddr mr_offset;
MemoryRegionSection *section;
- MemoryRegion *mr;
- uint64_t val;
- MemTxResult r;
+ hwaddr mr_offset;
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
- mr = section->mr;
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
+ section = iotlb_to_section(cpu, xlat, attrs);
+ mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
if (!cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
+ *out_offset = mr_offset;
+ return section;
+}
+
+static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
+ unsigned size, MMUAccessType access_type, int mmu_idx,
+ MemTxResult response, uintptr_t retaddr,
+ MemoryRegionSection *section, hwaddr mr_offset)
+{
+ hwaddr physaddr = (mr_offset +
+ section->offset_within_address_space -
+ section->offset_within_region);
+
+ cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
+ mmu_idx, full->attrs, response, retaddr);
+}
+
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
+ int mmu_idx, vaddr addr, uintptr_t retaddr,
+ MMUAccessType access_type, MemOp op)
+{
+ MemoryRegionSection *section;
+ hwaddr mr_offset;
+ MemoryRegion *mr;
+ MemTxResult r;
+ uint64_t val;
+
+ section = io_prepare(&mr_offset, env, full->xlat_section,
+ full->attrs, addr, retaddr);
+ mr = section->mr;
+
{
QEMU_IOTHREAD_LOCK_GUARD();
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
}
if (r != MEMTX_OK) {
- hwaddr physaddr = mr_offset +
- section->offset_within_address_space -
- section->offset_within_region;
-
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
- mmu_idx, full->attrs, r, retaddr);
+ io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
+ r, retaddr, section, mr_offset);
}
return val;
}
@@ -1406,19 +1429,14 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, uint64_t val, vaddr addr,
uintptr_t retaddr, MemOp op)
{
- CPUState *cpu = env_cpu(env);
- hwaddr mr_offset;
MemoryRegionSection *section;
+ hwaddr mr_offset;
MemoryRegion *mr;
MemTxResult r;
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
+ section = io_prepare(&mr_offset, env, full->xlat_section,
+ full->attrs, addr, retaddr);
mr = section->mr;
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
- if (!cpu->can_do_io) {
- cpu_io_recompile(cpu, retaddr);
- }
- cpu->mem_io_pc = retaddr;
{
QEMU_IOTHREAD_LOCK_GUARD();
@@ -1426,13 +1444,8 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
}
if (r != MEMTX_OK) {
- hwaddr physaddr = mr_offset +
- section->offset_within_address_space -
- section->offset_within_region;
-
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
- MMU_DATA_STORE, mmu_idx, full->attrs, r,
- retaddr);
+ io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
+ r, retaddr, section, mr_offset);
}
}
--
2.34.1
next prev parent reply other threads:[~2023-09-16 3:36 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-16 3:29 [PULL 00/39] tcg patch queue Richard Henderson
2023-09-16 3:29 ` [PULL 01/39] accel/tcg: mttcg remove false-negative halted assertion Richard Henderson
2023-09-18 6:44 ` Nicholas Piggin
2023-09-18 7:59 ` Alex Bennée
2023-09-18 10:53 ` Nicholas Piggin
2023-09-18 12:19 ` Alex Bennée
2023-09-16 3:29 ` [PULL 02/39] accel/tcg: Fix the comment for CPUTLBEntryFull Richard Henderson
2023-09-16 3:29 ` [PULL 03/39] util: Delete checks for old host definitions Richard Henderson
2023-09-16 3:29 ` [PULL 04/39] softmmu: " Richard Henderson
2023-09-16 3:29 ` [PULL 05/39] thunk: " Richard Henderson
2023-09-16 3:29 ` [PULL 06/39] tcg/loongarch64: Import LSX instructions Richard Henderson
2023-09-16 3:29 ` [PULL 07/39] tcg/loongarch64: Lower basic tcg vec ops to LSX Richard Henderson
2023-09-16 3:29 ` [PULL 08/39] tcg: pass vece to tcg_target_const_match() Richard Henderson
2023-09-16 3:29 ` [PULL 09/39] tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt Richard Henderson
2023-09-16 3:29 ` [PULL 10/39] tcg/loongarch64: Lower add/sub_vec to vadd/vsub Richard Henderson
2023-09-16 3:29 ` [PULL 11/39] tcg/loongarch64: Lower vector bitwise operations Richard Henderson
2023-09-16 3:29 ` [PULL 12/39] tcg/loongarch64: Lower neg_vec to vneg Richard Henderson
2023-09-16 3:29 ` [PULL 13/39] tcg/loongarch64: Lower mul_vec to vmul Richard Henderson
2023-09-16 3:29 ` [PULL 14/39] tcg/loongarch64: Lower vector min max ops Richard Henderson
2023-09-16 3:29 ` [PULL 15/39] tcg/loongarch64: Lower vector saturated ops Richard Henderson
2023-09-16 3:29 ` [PULL 16/39] tcg/loongarch64: Lower vector shift vector ops Richard Henderson
2023-09-16 3:29 ` [PULL 17/39] tcg/loongarch64: Lower bitsel_vec to vbitsel Richard Henderson
2023-09-16 3:29 ` [PULL 18/39] tcg/loongarch64: Lower vector shift integer ops Richard Henderson
2023-09-16 3:29 ` [PULL 19/39] tcg/loongarch64: Lower rotv_vec ops to LSX Richard Henderson
2023-09-16 3:29 ` [PULL 20/39] tcg/loongarch64: Lower rotli_vec to vrotri Richard Henderson
2023-09-16 3:29 ` [PULL 21/39] tcg/loongarch64: Implement 128-bit load & store Richard Henderson
2023-09-16 3:29 ` [PULL 22/39] tcg: Add gvec compare with immediate and scalar operand Richard Henderson
2023-09-16 3:29 ` [PULL 23/39] target/arm: Use tcg_gen_gvec_cmpi for compare vs 0 Richard Henderson
2023-09-16 3:29 ` [PULL 24/39] accel/tcg: Simplify tlb_plugin_lookup Richard Henderson
2023-09-16 3:29 ` Richard Henderson [this message]
2023-09-16 3:29 ` [PULL 26/39] accel/tcg: Use CPUTLBEntryFull.phys_addr in io_failed Richard Henderson
2023-09-16 3:29 ` [PULL 27/39] plugin: Simplify struct qemu_plugin_hwaddr Richard Henderson
2023-09-16 3:30 ` [PULL 28/39] accel/tcg: Merge cpu_transaction_failed into io_failed Richard Henderson
2023-09-16 3:30 ` [PULL 29/39] accel/tcg: Replace direct use of io_readx/io_writex in do_{ld, st}_1 Richard Henderson
2023-09-16 3:30 ` [PULL 30/39] accel/tcg: Merge io_readx into do_ld_mmio_beN Richard Henderson
2023-09-16 3:30 ` [PULL 31/39] accel/tcg: Merge io_writex into do_st_mmio_leN Richard Henderson
2023-09-16 3:30 ` [PULL 32/39] accel/tcg: Introduce do_ld16_mmio_beN Richard Henderson
2023-09-16 3:30 ` [PULL 33/39] accel/tcg: Introduce do_st16_mmio_leN Richard Henderson
2023-09-16 3:30 ` [PULL 34/39] fpu: Add conversions between bfloat16 and [u]int8 Richard Henderson
2023-09-16 3:30 ` [PULL 35/39] fpu: Handle m68k extended precision denormals properly Richard Henderson
2023-09-16 3:30 ` [PULL 36/39] tcg: Add tcg_out_tb_start backend hook Richard Henderson
2023-09-16 3:30 ` [PULL 37/39] util/cpuinfo-aarch64: Add CPUINFO_BTI Richard Henderson
2023-09-16 3:30 ` [PULL 38/39] tcg/aarch64: Emit BTI insns at jump landing pads Richard Henderson
2023-09-16 3:30 ` [PULL 39/39] tcg: Map code_gen_buffer with PROT_BTI Richard Henderson
2023-09-16 4:07 ` [PULL 00/39] tcg patch queue Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230916033011.479144-26-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).