From: Alvise Rigo <a.rigo@virtualopensystems.com>
To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com
Cc: jani.kokkonen@huawei.com, claudio.fontana@huawei.com,
tech@virtualopensystems.com, alex.bennee@linaro.org,
pbonzini@redhat.com, rth@twiddle.net, serge.fdrv@gmail.com,
Alvise Rigo <a.rigo@virtualopensystems.com>,
Peter Crosthwaite <crosthwaite.peter@gmail.com>
Subject: [Qemu-devel] [RFC v8 04/14] softmmu: Simplify helper_*_st_name, wrap RAM code
Date: Tue, 19 Apr 2016 15:39:21 +0200 [thread overview]
Message-ID: <1461073171-22953-5-git-send-email-a.rigo@virtualopensystems.com> (raw)
In-Reply-To: <1461073171-22953-1-git-send-email-a.rigo@virtualopensystems.com>
Attempting to simplify the helper_*_st_name, wrap the code relative to a
RAM access into an inline function. The function covers both BE and LE cases
and it is expanded twice in each helper (TODO: check this last statement).
Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
CC: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
softmmu_template.h | 80 +++++++++++++++++++++++++++---------------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/softmmu_template.h b/softmmu_template.h
index 9185486..ea6a0fb 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -433,13 +433,48 @@ static inline void smmu_helper(do_mmio_store)(CPUArchState *env,
glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
}
+static inline void smmu_helper(do_ram_store)(CPUArchState *env,
+ bool little_endian, DATA_TYPE val,
+ target_ulong addr, TCGMemOpIdx oi,
+ unsigned mmu_idx, int index,
+ uintptr_t retaddr)
+{
+ uintptr_t haddr;
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ smmu_helper(do_unl_store)(env, little_endian, val, addr, oi, mmu_idx,
+ retaddr);
+ return;
+ }
+
+ /* Handle aligned access or unaligned access in the same page. */
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+#if DATA_SIZE == 1
+ glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
+#else
+ if (little_endian) {
+ glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
+ } else {
+ glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
+ }
+#endif
+}
+
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -465,27 +500,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return;
}
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- smmu_helper(do_unl_store)(env, true, val, addr, oi, mmu_idx, retaddr);
- return;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
-#if DATA_SIZE == 1
- glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-#else
- glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-#endif
+ smmu_helper(do_ram_store)(env, true, val, addr, oi, mmu_idx, index,
+ retaddr);
}
#if DATA_SIZE > 1
@@ -495,7 +511,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -521,23 +536,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
return;
}
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- smmu_helper(do_unl_store)(env, false, val, addr, oi, mmu_idx, retaddr);
- return;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
+ smmu_helper(do_ram_store)(env, false, val, addr, oi, mmu_idx, index,
+ retaddr);
}
#endif /* DATA_SIZE > 1 */
--
2.8.0
next prev parent reply other threads:[~2016-04-19 13:39 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-19 13:39 [Qemu-devel] [RFC v8 00/14] Slow-path for atomic instruction translation Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 01/14] exec.c: Add new exclusive bitmap to ram_list Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 02/14] softmmu: Simplify helper_*_st_name, wrap unaligned code Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 03/14] softmmu: Simplify helper_*_st_name, wrap MMIO code Alvise Rigo
2016-04-19 13:39 ` Alvise Rigo [this message]
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 05/14] softmmu: Add new TLB_EXCL flag Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 06/14] qom: cpu: Add CPUClass hooks for exclusive range Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 07/14] softmmu: Add helpers for a new slowpath Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 08/14] softmmu: Add history of excl accesses Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 09/14] softmmu: Honor the new exclusive bitmap Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 10/14] softmmu: Support MMIO exclusive accesses Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 11/14] tcg: Create new runtime helpers for excl accesses Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 12/14] target-arm: translate: Use ld/st excl for atomic insns Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 13/14] target-arm: cpu64: use custom set_excl hook Alvise Rigo
2016-04-19 13:39 ` [Qemu-devel] [RFC v8 14/14] target-arm: aarch64: Use ls/st exclusive for atomic insns Alvise Rigo
2016-06-09 11:42 ` [Qemu-devel] [RFC v8 00/14] Slow-path for atomic instruction translation Sergey Fedorov
2016-06-09 12:35 ` alvise rigo
2016-06-09 12:52 ` Sergey Fedorov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1461073171-22953-5-git-send-email-a.rigo@virtualopensystems.com \
--to=a.rigo@virtualopensystems.com \
--cc=alex.bennee@linaro.org \
--cc=claudio.fontana@huawei.com \
--cc=crosthwaite.peter@gmail.com \
--cc=jani.kokkonen@huawei.com \
--cc=mttcg@listserver.greensocs.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=serge.fdrv@gmail.com \
--cc=tech@virtualopensystems.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).