From: "Alex Bennée" <alex.bennee@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org, mark.cave-ayland@ilande.co.uk,
cota@braap.org, "Alex Bennée" <alex.bennee@linaro.org>
Subject: [Qemu-devel] [PATCH v5 07/15] accel/tcg: demacro cputlb
Date: Tue, 30 Apr 2019 17:52:26 +0100 [thread overview]
Message-ID: <20190430165234.32272-8-alex.bennee@linaro.org> (raw)
In-Reply-To: <20190430165234.32272-1-alex.bennee@linaro.org>
Instead of expanding a series of macros to generate the load/store
helpers we move stuff into common functions and rely on the compiler
to eliminate the dead code for each variant.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
v5
- merged rth's fixes:
- cast to uint64_t instead of tcg_target_ulong
- make haddr void * instead of uintptr_t (reduce casting)
- common & size_mask
---
accel/tcg/cputlb.c | 478 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 452 insertions(+), 26 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f2f618217d..12f21865ee 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1168,26 +1168,421 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
#ifdef TARGET_WORDS_BIGENDIAN
-# define TGT_BE(X) (X)
-# define TGT_LE(X) BSWAP(X)
+#define NEED_BE_BSWAP 0
+#define NEED_LE_BSWAP 1
#else
-# define TGT_BE(X) BSWAP(X)
-# define TGT_LE(X) (X)
+#define NEED_BE_BSWAP 1
+#define NEED_LE_BSWAP 0
#endif
-#define MMUSUFFIX _mmu
+/*
+ * Byte Swap Helper
+ *
+ * This should all dead code away depending on the build host and
+ * access type.
+ */
-#define DATA_SIZE 1
-#include "softmmu_template.h"
+static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
+{
+ if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
+ switch (size) {
+ case 1: return val;
+ case 2: return bswap16(val);
+ case 4: return bswap32(val);
+ case 8: return bswap64(val);
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ return val;
+ }
+}
-#define DATA_SIZE 2
-#include "softmmu_template.h"
+/*
+ * Load Helpers
+ *
+ * We support two different access types. SOFTMMU_CODE_ACCESS is
+ * specifically for reading instructions from system memory. It is
+ * called by the translation loop and in some helpers where the code
+ * is disassembled. It shouldn't be called directly by guest code.
+ */
-#define DATA_SIZE 4
-#include "softmmu_template.h"
+static uint64_t load_helper(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr,
+ size_t size, bool big_endian,
+ bool code_read)
+{
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ const size_t tlb_off = code_read ?
+ offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
+ void *haddr;
+ uint64_t res;
+
+ /* Handle CPU specific unaligned behaviour */
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr,
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
+ mmu_idx, retaddr);
+ }
-#define DATA_SIZE 8
-#include "softmmu_template.h"
+ /* If the TLB entry is for a different page, reload and try again. */
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ addr & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), addr, size,
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
+ mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
+ uint64_t tmp;
+
+ if ((addr & (size - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+
+ tmp = io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
+ tlb_addr & TLB_RECHECK,
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD, size);
+ return handle_bswap(tmp, size, big_endian);
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ target_ulong addr1, addr2;
+ tcg_target_ulong r1, r2;
+ unsigned shift;
+ do_unaligned_access:
+ addr1 = addr & ~(size - 1);
+ addr2 = addr1 + size;
+ r1 = load_helper(env, addr1, oi, retaddr, size, big_endian, code_read);
+ r2 = load_helper(env, addr2, oi, retaddr, size, big_endian, code_read);
+ shift = (addr & (size - 1)) * 8;
+
+ if (big_endian) {
+ /* Big-endian combine. */
+ res = (r1 << shift) | (r2 >> ((size * 8) - shift));
+ } else {
+ /* Little-endian combine. */
+ res = (r1 >> shift) | (r2 << ((size * 8) - shift));
+ }
+ return res & MAKE_64BIT_MASK(0, size * 8);
+ }
+
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+
+ switch (size) {
+ case 1:
+ res = ldub_p(haddr);
+ break;
+ case 2:
+ if (big_endian) {
+ res = lduw_be_p(haddr);
+ } else {
+ res = lduw_le_p(haddr);
+ }
+ break;
+ case 4:
+ if (big_endian) {
+ res = (uint32_t)ldl_be_p(haddr);
+ } else {
+ res = (uint32_t)ldl_le_p(haddr);
+ }
+ break;
+ case 8:
+ if (big_endian) {
+ res = ldq_be_p(haddr);
+ } else {
+ res = ldq_le_p(haddr);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return res;
+}
+
+/*
+ * For the benefit of TCG generated code, we want to avoid the
+ * complication of ABI-specific return type promotion and always
+ * return a value extended to the register size of the host. This is
+ * tcg_target_long, except in the case of a 32-bit host and 64-bit
+ * data, and for that we always have uint64_t.
+ *
+ * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
+ */
+
+tcg_target_ulong __attribute__((flatten))
+helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, false);
+}
+
+uint64_t __attribute__((flatten))
+helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, false, false);
+}
+
+uint64_t __attribute__((flatten))
+helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, true, false);
+}
+
+/*
+ * Provide signed versions of the load routines as well. We can of course
+ * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
+ */
+
+
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
+}
+
+/*
+ * Store Helpers
+ */
+
+static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr, size_t size,
+ bool big_endian)
+{
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ target_ulong tlb_addr = tlb_addr_write(entry);
+ const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
+ void *haddr;
+
+ /* Handle CPU specific unaligned behaviour */
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /* If the TLB entry is for a different page, reload and try again. */
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ addr & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
+
+ if ((addr & (size - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+
+ io_writex(env, iotlbentry, mmu_idx,
+ handle_bswap(val, size, big_endian),
+ addr, retaddr, tlb_addr & TLB_RECHECK, size);
+ return;
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ int i;
+ uintptr_t index2;
+ CPUTLBEntry *entry2;
+ target_ulong page2, tlb_addr2;
+ do_unaligned_access:
+ /*
+ * Ensure the second page is in the TLB. Note that the first page
+ * is already guaranteed to be filled, and that the second page
+ * cannot evict the first.
+ */
+ page2 = (addr + size) & TARGET_PAGE_MASK;
+ index2 = tlb_index(env, mmu_idx, page2);
+ entry2 = tlb_entry(env, mmu_idx, page2);
+ tlb_addr2 = tlb_addr_write(entry2);
+ if (!tlb_hit_page(tlb_addr2, page2)
+ && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
+ page2 & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /*
+ * XXX: not efficient, but simple.
+ * This loop must go in the forward direction to avoid issues
+ * with self-modifying code in Windows 64-bit.
+ */
+ for (i = 0; i < size; ++i) {
+ uint8_t val8;
+ if (big_endian) {
+ /* Big-endian extract. */
+ val8 = val >> (((size - 1) * 8) - (i * 8));
+ } else {
+ /* Little-endian extract. */
+ val8 = val >> (i * 8);
+ }
+ store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian);
+ }
+ return;
+ }
+
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+
+ switch (size) {
+ case 1:
+ stb_p(haddr, val);
+ break;
+ case 2:
+ if (big_endian) {
+ stw_be_p(haddr, val);
+ } else {
+ stw_le_p(haddr, val);
+ }
+ break;
+ case 4:
+ if (big_endian) {
+ stl_be_p(haddr, val);
+ } else {
+ stl_le_p(haddr, val);
+ }
+ break;
+ case 8:
+ if (big_endian) {
+ stq_be_p(haddr, val);
+ } else {
+ stq_le_p(haddr, val);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+void __attribute__((flatten))
+helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 1, false);
+}
+
+void __attribute__((flatten))
+helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 2, false);
+}
+
+void __attribute__((flatten))
+helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 2, true);
+}
+
+void __attribute__((flatten))
+helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 4, false);
+}
+
+void __attribute__((flatten))
+helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 4, true);
+}
+
+void __attribute__((flatten))
+helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 8, false);
+}
+
+void __attribute__((flatten))
+helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 8, true);
+}
/* First set of helpers allows passing in of OI and RETADDR. This makes
them callable from other helpers. */
@@ -1248,20 +1643,51 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Code access functions. */
-#undef MMUSUFFIX
-#define MMUSUFFIX _cmmu
-#undef GETPC
-#define GETPC() ((uintptr_t)0)
-#define SOFTMMU_CODE_ACCESS
+uint8_t __attribute__((flatten))
+helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, true);
+}
-#define DATA_SIZE 1
-#include "softmmu_template.h"
+uint16_t __attribute__((flatten))
+helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, true);
+}
-#define DATA_SIZE 2
-#include "softmmu_template.h"
+uint16_t __attribute__((flatten))
+helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, true);
+}
-#define DATA_SIZE 4
-#include "softmmu_template.h"
+uint32_t __attribute__((flatten))
+helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, true);
+}
-#define DATA_SIZE 8
-#include "softmmu_template.h"
+uint32_t __attribute__((flatten))
+helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, true);
+}
+
+uint64_t __attribute__((flatten))
+helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, false, true);
+}
+
+uint64_t __attribute__((flatten))
+helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, true, true);
+}
--
2.20.1
WARNING: multiple messages have this Message-ID (diff)
From: "Alex Bennée" <alex.bennee@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Alex Bennée" <alex.bennee@linaro.org>,
qemu-arm@nongnu.org, mark.cave-ayland@ilande.co.uk,
cota@braap.org
Subject: [Qemu-devel] [PATCH v5 07/15] accel/tcg: demacro cputlb
Date: Tue, 30 Apr 2019 17:52:26 +0100 [thread overview]
Message-ID: <20190430165234.32272-8-alex.bennee@linaro.org> (raw)
Message-ID: <20190430165226.dvhgFsWJhsWnKXpT3MfuuDAheEIXsvLxM-4egYYFxYY@z> (raw)
In-Reply-To: <20190430165234.32272-1-alex.bennee@linaro.org>
Instead of expanding a series of macros to generate the load/store
helpers we move stuff into common functions and rely on the compiler
to eliminate the dead code for each variant.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
v5
- merged rth's fixes:
- cast to uint64_t instead of tcg_target_ulong
- make haddr void * instead of uintptr_t (reduce casting)
- common & size_mask
---
accel/tcg/cputlb.c | 478 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 452 insertions(+), 26 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f2f618217d..12f21865ee 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1168,26 +1168,421 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
#ifdef TARGET_WORDS_BIGENDIAN
-# define TGT_BE(X) (X)
-# define TGT_LE(X) BSWAP(X)
+#define NEED_BE_BSWAP 0
+#define NEED_LE_BSWAP 1
#else
-# define TGT_BE(X) BSWAP(X)
-# define TGT_LE(X) (X)
+#define NEED_BE_BSWAP 1
+#define NEED_LE_BSWAP 0
#endif
-#define MMUSUFFIX _mmu
+/*
+ * Byte Swap Helper
+ *
+ * This should all dead code away depending on the build host and
+ * access type.
+ */
-#define DATA_SIZE 1
-#include "softmmu_template.h"
+static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
+{
+ if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
+ switch (size) {
+ case 1: return val;
+ case 2: return bswap16(val);
+ case 4: return bswap32(val);
+ case 8: return bswap64(val);
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ return val;
+ }
+}
-#define DATA_SIZE 2
-#include "softmmu_template.h"
+/*
+ * Load Helpers
+ *
+ * We support two different access types. SOFTMMU_CODE_ACCESS is
+ * specifically for reading instructions from system memory. It is
+ * called by the translation loop and in some helpers where the code
+ * is disassembled. It shouldn't be called directly by guest code.
+ */
-#define DATA_SIZE 4
-#include "softmmu_template.h"
+static uint64_t load_helper(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr,
+ size_t size, bool big_endian,
+ bool code_read)
+{
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ const size_t tlb_off = code_read ?
+ offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
+ void *haddr;
+ uint64_t res;
+
+ /* Handle CPU specific unaligned behaviour */
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr,
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
+ mmu_idx, retaddr);
+ }
-#define DATA_SIZE 8
-#include "softmmu_template.h"
+ /* If the TLB entry is for a different page, reload and try again. */
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ addr & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), addr, size,
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
+ mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
+ uint64_t tmp;
+
+ if ((addr & (size - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+
+ tmp = io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
+ tlb_addr & TLB_RECHECK,
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD, size);
+ return handle_bswap(tmp, size, big_endian);
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ target_ulong addr1, addr2;
+ tcg_target_ulong r1, r2;
+ unsigned shift;
+ do_unaligned_access:
+ addr1 = addr & ~(size - 1);
+ addr2 = addr1 + size;
+ r1 = load_helper(env, addr1, oi, retaddr, size, big_endian, code_read);
+ r2 = load_helper(env, addr2, oi, retaddr, size, big_endian, code_read);
+ shift = (addr & (size - 1)) * 8;
+
+ if (big_endian) {
+ /* Big-endian combine. */
+ res = (r1 << shift) | (r2 >> ((size * 8) - shift));
+ } else {
+ /* Little-endian combine. */
+ res = (r1 >> shift) | (r2 << ((size * 8) - shift));
+ }
+ return res & MAKE_64BIT_MASK(0, size * 8);
+ }
+
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+
+ switch (size) {
+ case 1:
+ res = ldub_p(haddr);
+ break;
+ case 2:
+ if (big_endian) {
+ res = lduw_be_p(haddr);
+ } else {
+ res = lduw_le_p(haddr);
+ }
+ break;
+ case 4:
+ if (big_endian) {
+ res = (uint32_t)ldl_be_p(haddr);
+ } else {
+ res = (uint32_t)ldl_le_p(haddr);
+ }
+ break;
+ case 8:
+ if (big_endian) {
+ res = ldq_be_p(haddr);
+ } else {
+ res = ldq_le_p(haddr);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return res;
+}
+
+/*
+ * For the benefit of TCG generated code, we want to avoid the
+ * complication of ABI-specific return type promotion and always
+ * return a value extended to the register size of the host. This is
+ * tcg_target_long, except in the case of a 32-bit host and 64-bit
+ * data, and for that we always have uint64_t.
+ *
+ * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
+ */
+
+tcg_target_ulong __attribute__((flatten))
+helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, false);
+}
+
+tcg_target_ulong __attribute__((flatten))
+helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, false);
+}
+
+uint64_t __attribute__((flatten))
+helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, false, false);
+}
+
+uint64_t __attribute__((flatten))
+helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, true, false);
+}
+
+/*
+ * Provide signed versions of the load routines as well. We can of course
+ * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
+ */
+
+
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
+}
+
+/*
+ * Store Helpers
+ */
+
+static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr, size_t size,
+ bool big_endian)
+{
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ target_ulong tlb_addr = tlb_addr_write(entry);
+ const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
+ unsigned a_bits = get_alignment_bits(get_memop(oi));
+ void *haddr;
+
+ /* Handle CPU specific unaligned behaviour */
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /* If the TLB entry is for a different page, reload and try again. */
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
+ addr & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
+ }
+
+ /* Handle an IO access. */
+ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
+ CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
+
+ if ((addr & (size - 1)) != 0) {
+ goto do_unaligned_access;
+ }
+
+ io_writex(env, iotlbentry, mmu_idx,
+ handle_bswap(val, size, big_endian),
+ addr, retaddr, tlb_addr & TLB_RECHECK, size);
+ return;
+ }
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ int i;
+ uintptr_t index2;
+ CPUTLBEntry *entry2;
+ target_ulong page2, tlb_addr2;
+ do_unaligned_access:
+ /*
+ * Ensure the second page is in the TLB. Note that the first page
+ * is already guaranteed to be filled, and that the second page
+ * cannot evict the first.
+ */
+ page2 = (addr + size) & TARGET_PAGE_MASK;
+ index2 = tlb_index(env, mmu_idx, page2);
+ entry2 = tlb_entry(env, mmu_idx, page2);
+ tlb_addr2 = tlb_addr_write(entry2);
+ if (!tlb_hit_page(tlb_addr2, page2)
+ && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
+ page2 & TARGET_PAGE_MASK)) {
+ tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ /*
+ * XXX: not efficient, but simple.
+ * This loop must go in the forward direction to avoid issues
+ * with self-modifying code in Windows 64-bit.
+ */
+ for (i = 0; i < size; ++i) {
+ uint8_t val8;
+ if (big_endian) {
+ /* Big-endian extract. */
+ val8 = val >> (((size - 1) * 8) - (i * 8));
+ } else {
+ /* Little-endian extract. */
+ val8 = val >> (i * 8);
+ }
+ store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian);
+ }
+ return;
+ }
+
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+
+ switch (size) {
+ case 1:
+ stb_p(haddr, val);
+ break;
+ case 2:
+ if (big_endian) {
+ stw_be_p(haddr, val);
+ } else {
+ stw_le_p(haddr, val);
+ }
+ break;
+ case 4:
+ if (big_endian) {
+ stl_be_p(haddr, val);
+ } else {
+ stl_le_p(haddr, val);
+ }
+ break;
+ case 8:
+ if (big_endian) {
+ stq_be_p(haddr, val);
+ } else {
+ stq_le_p(haddr, val);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+void __attribute__((flatten))
+helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 1, false);
+}
+
+void __attribute__((flatten))
+helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 2, false);
+}
+
+void __attribute__((flatten))
+helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 2, true);
+}
+
+void __attribute__((flatten))
+helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 4, false);
+}
+
+void __attribute__((flatten))
+helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 4, true);
+}
+
+void __attribute__((flatten))
+helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 8, false);
+}
+
+void __attribute__((flatten))
+helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ store_helper(env, addr, val, oi, retaddr, 8, true);
+}
/* First set of helpers allows passing in of OI and RETADDR. This makes
them callable from other helpers. */
@@ -1248,20 +1643,51 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Code access functions. */
-#undef MMUSUFFIX
-#define MMUSUFFIX _cmmu
-#undef GETPC
-#define GETPC() ((uintptr_t)0)
-#define SOFTMMU_CODE_ACCESS
+uint8_t __attribute__((flatten))
+helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, true);
+}
-#define DATA_SIZE 1
-#include "softmmu_template.h"
+uint16_t __attribute__((flatten))
+helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, true);
+}
-#define DATA_SIZE 2
-#include "softmmu_template.h"
+uint16_t __attribute__((flatten))
+helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, true);
+}
-#define DATA_SIZE 4
-#include "softmmu_template.h"
+uint32_t __attribute__((flatten))
+helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, true);
+}
-#define DATA_SIZE 8
-#include "softmmu_template.h"
+uint32_t __attribute__((flatten))
+helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, true);
+}
+
+uint64_t __attribute__((flatten))
+helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, false, true);
+}
+
+uint64_t __attribute__((flatten))
+helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 8, true, true);
+}
--
2.20.1
next prev parent reply other threads:[~2019-04-30 16:55 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-30 16:52 [Qemu-devel] [PATCH v5 00/15] demacro softmmu (plus tests/coverage) Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 01/15] tests/tcg/multiarch: add support for multiarch system tests Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 18:35 ` Richard Henderson
2019-05-01 18:35 ` Richard Henderson
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 02/15] tests/tcg/multiarch: add hello world system test Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 18:35 ` Richard Henderson
2019-05-01 18:35 ` Richard Henderson
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 03/15] tests/tcg/aarch64: add system boot.S Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 14:37 ` Richard Henderson
2019-05-01 14:37 ` Richard Henderson
2019-05-01 14:57 ` Alex Bennée
2019-05-01 14:57 ` Alex Bennée
2019-05-08 17:45 ` Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 04/15] tests/tcg/multiarch: move the system memory test Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 18:35 ` Richard Henderson
2019-05-01 18:35 ` Richard Henderson
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 05/15] tests/tcg/minilib: support %c format char Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 14:40 ` Richard Henderson
2019-05-01 14:40 ` Richard Henderson
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 06/15] tests/tcg/multiarch: expand system memory test to cover more Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 14:44 ` Richard Henderson
2019-05-01 14:44 ` Richard Henderson
2019-04-30 16:52 ` Alex Bennée [this message]
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 07/15] accel/tcg: demacro cputlb Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 08/15] cputlb: Move TLB_RECHECK handling into load/store_helper Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 09/15] accel/tcg: remove softmmu_template.h Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 14:46 ` Richard Henderson
2019-05-01 14:46 ` Richard Henderson
2019-05-01 15:10 ` Alex Bennée
2019-05-01 15:10 ` Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 10/15] cputlb: Drop attribute flatten Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 11/15] cputlb: Do unaligned load recursion to outermost function Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 12/15] cputlb: Do unaligned store " Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 13/15] Makefile: fix coverage-report reference to BUILD_DIR Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 14:48 ` Richard Henderson
2019-05-01 14:48 ` Richard Henderson
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 14/15] Makefile: include per-target build directories in coverage report Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 14:49 ` Richard Henderson
2019-05-01 14:49 ` Richard Henderson
2019-04-30 16:52 ` [Qemu-devel] [PATCH v5 15/15] Makefile.target: support per-target coverage reports Alex Bennée
2019-04-30 16:52 ` Alex Bennée
2019-05-01 14:50 ` Richard Henderson
2019-05-01 14:50 ` Richard Henderson
2019-05-01 18:39 ` [Qemu-devel] [PATCH v5 00/15] demacro softmmu (plus tests/coverage) Richard Henderson
2019-05-01 18:39 ` Richard Henderson
2019-05-03 19:28 ` Alex Bennée
2019-05-03 19:28 ` Alex Bennée
2019-05-10 10:36 ` Alex Bennée
2019-05-10 14:55 ` Mark Cave-Ayland
2019-05-10 17:01 ` Emilio G. Cota
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190430165234.32272-8-alex.bennee@linaro.org \
--to=alex.bennee@linaro.org \
--cc=cota@braap.org \
--cc=mark.cave-ayland@ilande.co.uk \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).