From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: pbonzini@redhat.com, alex.bennee@linaro.org, stefanha@redhat.com,
david@redhat.com
Subject: [PATCH v4 05/16] cputlb: Split out load/store_memop
Date: Mon, 23 Sep 2019 15:59:53 -0700 [thread overview]
Message-ID: <20190923230004.9231-6-richard.henderson@linaro.org> (raw)
In-Reply-To: <20190923230004.9231-1-richard.henderson@linaro.org>
We will shortly be using these more than once.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 110 +++++++++++++++++++++++----------------------
1 file changed, 57 insertions(+), 53 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index e529af6d09..430ba4a69d 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1281,6 +1281,29 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
+static inline uint64_t QEMU_ALWAYS_INLINE
+load_memop(const void *haddr, MemOp op)
+{
+ switch (op) {
+ case MO_UB:
+ return ldub_p(haddr);
+ case MO_BEUW:
+ return lduw_be_p(haddr);
+ case MO_LEUW:
+ return lduw_le_p(haddr);
+ case MO_BEUL:
+ return (uint32_t)ldl_be_p(haddr);
+ case MO_LEUL:
+ return (uint32_t)ldl_le_p(haddr);
+ case MO_BEQ:
+ return ldq_be_p(haddr);
+ case MO_LEQ:
+ return ldq_le_p(haddr);
+ default:
+ optimize_away();
+ }
+}
+
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
@@ -1373,33 +1396,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
- switch (op) {
- case MO_UB:
- res = ldub_p(haddr);
- break;
- case MO_BEUW:
- res = lduw_be_p(haddr);
- break;
- case MO_LEUW:
- res = lduw_le_p(haddr);
- break;
- case MO_BEUL:
- res = (uint32_t)ldl_be_p(haddr);
- break;
- case MO_LEUL:
- res = (uint32_t)ldl_le_p(haddr);
- break;
- case MO_BEQ:
- res = ldq_be_p(haddr);
- break;
- case MO_LEQ:
- res = ldq_le_p(haddr);
- break;
- default:
- optimize_away();
- }
-
- return res;
+ return load_memop(haddr, op);
}
/*
@@ -1415,7 +1412,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_UB, false,
+ full_ldub_mmu);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
@@ -1530,6 +1528,36 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
* Store Helpers
*/
+static inline void QEMU_ALWAYS_INLINE
+store_memop(void *haddr, uint64_t val, MemOp op)
+{
+ switch (op) {
+ case MO_UB:
+ stb_p(haddr, val);
+ break;
+ case MO_BEUW:
+ stw_be_p(haddr, val);
+ break;
+ case MO_LEUW:
+ stw_le_p(haddr, val);
+ break;
+ case MO_BEUL:
+ stl_be_p(haddr, val);
+ break;
+ case MO_LEUL:
+ stl_le_p(haddr, val);
+ break;
+ case MO_BEQ:
+ stq_be_p(haddr, val);
+ break;
+ case MO_LEQ:
+ stq_le_p(haddr, val);
+ break;
+ default:
+ optimize_away();
+ }
+}
+
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
@@ -1657,31 +1685,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
- switch (op) {
- case MO_UB:
- stb_p(haddr, val);
- break;
- case MO_BEUW:
- stw_be_p(haddr, val);
- break;
- case MO_LEUW:
- stw_le_p(haddr, val);
- break;
- case MO_BEUL:
- stl_be_p(haddr, val);
- break;
- case MO_LEUL:
- stl_le_p(haddr, val);
- break;
- case MO_BEQ:
- stq_be_p(haddr, val);
- break;
- case MO_LEQ:
- stq_le_p(haddr, val);
- break;
- default:
- optimize_away();
- }
+ store_memop(haddr, val, op);
}
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
--
2.17.1
next prev parent reply other threads:[~2019-09-23 23:15 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-23 22:59 [PATCH v4 00/16] Move rom and notdirty handling to cputlb Richard Henderson
2019-09-23 22:59 ` [PATCH v4 01/16] exec: Use TARGET_PAGE_BITS_MIN for TLB flags Richard Henderson
2019-09-24 13:53 ` Alex Bennée
2019-09-23 22:59 ` [PATCH v4 02/16] cputlb: Disable __always_inline__ without optimization Richard Henderson
2019-09-24 13:56 ` Alex Bennée
2019-09-23 22:59 ` [PATCH v4 03/16] qemu/compiler.h: Add optimize_away Richard Henderson
2019-09-24 7:47 ` David Hildenbrand
2019-09-24 17:27 ` Richard Henderson
2019-09-24 17:29 ` David Hildenbrand
2019-09-24 15:47 ` Alex Bennée
2019-09-23 22:59 ` [PATCH v4 04/16] cputlb: Use optimize_away in load/store_helpers Richard Henderson
2019-09-24 7:47 ` David Hildenbrand
2019-09-24 15:47 ` Alex Bennée
2019-09-23 22:59 ` Richard Henderson [this message]
2019-09-24 7:48 ` [PATCH v4 05/16] cputlb: Split out load/store_memop David Hildenbrand
2019-09-24 15:51 ` Alex Bennée
2019-09-23 22:59 ` [PATCH v4 06/16] cputlb: Introduce TLB_BSWAP Richard Henderson
2019-09-24 18:25 ` Alex Bennée
2019-09-25 17:36 ` Richard Henderson
2019-09-23 22:59 ` [PATCH v4 07/16] exec: Adjust notdirty tracing Richard Henderson
2019-09-24 21:53 ` Alex Bennée
2019-09-23 22:59 ` [PATCH v4 08/16] cputlb: Move ROM handling from I/O path to TLB path Richard Henderson
2019-09-25 0:16 ` Alex Bennée
2019-09-25 6:59 ` David Hildenbrand
2019-09-25 16:01 ` Alex Bennée
2019-09-25 17:01 ` Richard Henderson
2019-09-23 22:59 ` [PATCH v4 09/16] cputlb: Move NOTDIRTY " Richard Henderson
2019-09-25 16:06 ` Alex Bennée
2019-09-23 22:59 ` [PATCH v4 10/16] cputlb: Partially inline memory_region_section_get_iotlb Richard Henderson
2019-09-24 7:59 ` David Hildenbrand
2019-09-25 17:55 ` Richard Henderson
2019-09-25 19:40 ` David Hildenbrand
2019-09-25 16:12 ` Alex Bennée
2019-09-23 22:59 ` [PATCH v4 11/16] cputlb: Merge and move memory_notdirty_write_{prepare, complete} Richard Henderson
2019-09-24 8:04 ` [PATCH v4 11/16] cputlb: Merge and move memory_notdirty_write_{prepare,complete} David Hildenbrand
2019-09-25 16:15 ` Alex Bennée
2019-09-23 23:00 ` [PATCH v4 12/16] cputlb: Handle TLB_NOTDIRTY in probe_access Richard Henderson
2019-09-24 8:05 ` David Hildenbrand
2019-09-25 16:21 ` Alex Bennée
2019-09-23 23:00 ` [PATCH v4 13/16] cputlb: Remove cpu->mem_io_vaddr Richard Henderson
2019-09-25 16:22 ` Alex Bennée
2019-09-23 23:00 ` [PATCH v4 14/16] cputlb: Remove tb_invalidate_phys_page_range is_cpu_write_access Richard Henderson
2019-09-25 16:23 ` Alex Bennée
2019-09-23 23:00 ` [PATCH v4 15/16] cputlb: Pass retaddr to tb_invalidate_phys_page_fast Richard Henderson
2019-09-25 16:28 ` Alex Bennée
2019-09-23 23:00 ` [PATCH v4 16/16] cputlb: Pass retaddr to tb_check_watchpoint Richard Henderson
2019-09-25 16:30 ` Alex Bennée
2019-09-25 18:52 ` [PATCH v4 00/16] Move rom and notdirty handling to cputlb Mark Cave-Ayland
2019-09-25 18:54 ` Mark Cave-Ayland
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190923230004.9231-6-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=alex.bennee@linaro.org \
--cc=david@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).