From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: alex.bennee@linaro.org, pbonzini@redhat.com,
mark.cave-ayland@ilande.co.uk, f4bug@amsat.org
Subject: [PATCH 03/15] accel/tcg: Use byte ops for unaligned loads
Date: Sat, 19 Jun 2021 10:26:14 -0700 [thread overview]
Message-ID: <20210619172626.875885-4-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210619172626.875885-1-richard.henderson@linaro.org>
From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
If an unaligned load is required then the load is split into
two separate accesses and combined. This does not work correctly
with MMIO accesses because the I/O subsystem may use a different
endianness than we are expecting.
Use byte loads to obviate I/O endianness. We already use byte
stores in store_helper_unaligned, so this solution has precedent.
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/360
Message-Id: <20210609093528.9616-1-mark.cave-ayland@ilande.co.uk>
[PMD: Extract load_helper_unaligned() in earlier patch]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20210609141010.1066750-3-f4bug@amsat.org>
[rth: Drop all of the stuff we do for stores not required by loads.]
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 93 ++++++++++++++++++----------------------------
1 file changed, 36 insertions(+), 57 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a94de90099..ba21487138 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1854,35 +1854,36 @@ load_memop(const void *haddr, MemOp op)
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper_unaligned(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
- FullLoadHelper *full_load)
+ FullLoadHelper *byte_load)
{
+ uintptr_t mmu_idx = get_mmuidx(oi);
size_t size = memop_size(op);
- target_ulong addr1, addr2;
- uint64_t res;
- uint64_t r1, r2;
- unsigned shift;
-
- addr1 = addr & ~((target_ulong)size - 1);
- addr2 = addr1 + size;
- r1 = full_load(env, addr1, oi, retaddr);
- r2 = full_load(env, addr2, oi, retaddr);
- shift = (addr & (size - 1)) * 8;
+ uint64_t val = 0;
+ int i;
+ /* XXX: not efficient, but simple. */
+ oi = make_memop_idx(MO_UB, mmu_idx);
if (memop_big_endian(op)) {
- /* Big-endian combine. */
- res = (r1 << shift) | (r2 >> ((size * 8) - shift));
+ for (i = 0; i < size; ++i) {
+ /* Big-endian load. */
+ uint64_t val8 = byte_load(env, addr + i, oi, retaddr);
+ val = (val << 8) | val8;
+ }
} else {
- /* Little-endian combine. */
- res = (r1 >> shift) | (r2 << ((size * 8) - shift));
+ for (i = 0; i < size; ++i) {
+ /* Little-endian load. */
+ uint64_t val8 = byte_load(env, addr + i, oi, retaddr);
+ val |= val8 << (i * 8);
+ }
}
- return res & MAKE_64BIT_MASK(0, size * 8);
+ return val;
}
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
- FullLoadHelper *full_load)
+ FullLoadHelper *byte_load)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1920,10 +1921,10 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
CPUIOTLBEntry *iotlbentry;
bool need_swap;
- /* For anything that is unaligned, recurse through full_load. */
+ /* For anything that is unaligned, recurse through byte_load. */
if ((addr & (size - 1)) != 0) {
return load_helper_unaligned(env, addr, oi, retaddr, op,
- code_read, full_load);
+ code_read, byte_load);
}
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
@@ -1961,7 +1962,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
&& unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
>= TARGET_PAGE_SIZE)) {
return load_helper_unaligned(env, addr, oi, retaddr, op,
- code_read, full_load);
+ code_read, byte_load);
}
haddr = (void *)((uintptr_t)addr + entry->addend);
@@ -1978,8 +1979,9 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
* We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
*/
-static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+static uint64_t __attribute__((noinline))
+full_ldub_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
}
@@ -1993,8 +1995,7 @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
- full_le_lduw_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_LEUW, false, full_ldub_mmu);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
@@ -2006,8 +2007,7 @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
- full_be_lduw_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_BEUW, false, full_ldub_mmu);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
@@ -2019,8 +2019,7 @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
- full_le_ldul_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_LEUL, false, full_ldub_mmu);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
@@ -2032,8 +2031,7 @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
- full_be_ldul_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_BEUL, false, full_ldub_mmu);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
@@ -2045,15 +2043,13 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
- helper_le_ldq_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_LEQ, false, full_ldub_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
- helper_be_ldq_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_BEQ, false, full_ldub_mmu);
}
/*
@@ -2732,8 +2728,9 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
/* Code access functions. */
-static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+static uint64_t __attribute__((noinline))
+full_ldub_code(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
}
@@ -2744,38 +2741,20 @@ uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
return full_ldub_code(env, addr, oi, 0);
}
-static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
-}
-
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
- return full_lduw_code(env, addr, oi, 0);
-}
-
-static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
+ return load_helper(env, addr, oi, 0, MO_TEUW, true, full_ldub_code);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
- return full_ldl_code(env, addr, oi, 0);
-}
-
-static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
+ return load_helper(env, addr, oi, 0, MO_TEUL, true, full_ldub_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
- return full_ldq_code(env, addr, oi, 0);
+ return load_helper(env, addr, oi, 0, MO_TEQ, true, full_ldub_code);
}
--
2.25.1
next prev parent reply other threads:[~2021-06-19 17:30 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-19 17:26 [PATCH 00/15] accel/tcg: Fix for #360 and other i/o alignment issues Richard Henderson
2021-06-19 17:26 ` [PATCH 01/15] NOTFORMERGE q800: test case for do_unaligned_access issue Richard Henderson
2021-06-19 17:26 ` [PATCH 02/15] accel/tcg: Extract load_helper_unaligned from load_helper Richard Henderson
2021-06-19 17:26 ` Richard Henderson [this message]
2021-06-19 17:26 ` [PATCH 04/15] accel/tcg: Don't test for watchpoints for code read Richard Henderson
2021-06-21 18:29 ` Philippe Mathieu-Daudé
2021-06-19 17:26 ` [PATCH 05/15] accel/tcg: Handle page span access before i/o access Richard Henderson
2021-06-19 17:26 ` [PATCH 06/15] softmmu/memory: Inline memory_region_dispatch_read1 Richard Henderson
2021-06-21 18:25 ` Philippe Mathieu-Daudé
2021-06-19 17:26 ` [PATCH 07/15] softmmu/memory: Simplify access_with_adjusted_size interface Richard Henderson
2021-06-21 18:27 ` Philippe Mathieu-Daudé
2021-06-19 17:26 ` [PATCH 08/15] hw/net/e1000e: Fix size of io operations Richard Henderson
2021-06-19 17:26 ` [PATCH 09/15] hw/net/e1000e: Fix impl.min_access_size Richard Henderson
2021-06-21 7:20 ` Jason Wang
2021-06-19 17:26 ` [PATCH 10/15] hw/pci-host/q35: Improve blackhole_ops Richard Henderson
2021-06-21 18:31 ` Philippe Mathieu-Daudé
2021-06-19 17:26 ` [PATCH 11/15] hw/scsi/megasas: Fix megasas_mmio_ops sizes Richard Henderson
2021-06-19 17:26 ` [PATCH 12/15] hw/scsi/megasas: Improve megasas_queue_ops min_access_size Richard Henderson
2021-06-19 17:26 ` [PATCH 13/15] softmmu/memory: Disallow short writes Richard Henderson
2021-06-19 17:26 ` [PATCH 14/15] softmmu/memory: Support some unaligned access Richard Henderson
2021-06-19 17:26 ` [PATCH 15/15] RFC accel/tcg: Defer some unaligned accesses to memory subsystem Richard Henderson
2021-06-20 13:08 ` [PATCH 00/15] accel/tcg: Fix for #360 and other i/o alignment issues Mark Cave-Ayland
2021-06-20 14:33 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210619172626.875885-4-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=alex.bennee@linaro.org \
--cc=f4bug@amsat.org \
--cc=mark.cave-ayland@ilande.co.uk \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).