From: Alvise Rigo <a.rigo@virtualopensystems.com>
To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com
Cc: alex.bennee@linaro.org, jani.kokkonen@huawei.com,
tech@virtualopensystems.com, claudio.fontana@huawei.com,
pbonzini@redhat.com
Subject: [Qemu-devel] [RFC v5 6/6] target-arm: translate: Use ld/st excl for atomic insns
Date: Thu, 24 Sep 2015 10:32:46 +0200 [thread overview]
Message-ID: <1443083566-10994-7-git-send-email-a.rigo@virtualopensystems.com> (raw)
In-Reply-To: <1443083566-10994-1-git-send-email-a.rigo@virtualopensystems.com>
Use the new LL/SC runtime helpers to handle the ARM atomic
instructions in softmmu_llsc_template.h.
In general, the helper generator
gen_helper_{ldlink,stcond}_aa32_i{8,16,32,64}() calls the function
helper_{le,be}_{ldlink,stcond}{ub,uw,ulq}_mmu() implemented in
softmmu_llsc_template.h.
Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
target-arm/translate.c | 121 +++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 117 insertions(+), 4 deletions(-)
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 69ac18c..fa85455 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -65,8 +65,12 @@ TCGv_ptr cpu_env;
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
+#ifndef CONFIG_TCG_USE_LDST_EXCL
static TCGv_i64 cpu_exclusive_addr;
static TCGv_i64 cpu_exclusive_val;
+#else
+static TCGv_i32 cpu_ll_sc_context;
+#endif
#ifdef CONFIG_USER_ONLY
static TCGv_i64 cpu_exclusive_test;
static TCGv_i32 cpu_exclusive_info;
@@ -99,10 +103,15 @@ void arm_translate_init(void)
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
+#ifdef CONFIG_TCG_USE_LDST_EXCL
+ cpu_ll_sc_context = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUARMState, ll_sc_context), "ll_sc_context");
+#else
cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_val), "exclusive_val");
+#endif
#ifdef CONFIG_USER_ONLY
cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_test), "exclusive_test");
@@ -7382,15 +7391,62 @@ static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
tcg_gen_or_i32(cpu_ZF, lo, hi);
}
-/* Load/Store exclusive instructions are implemented by remembering
+/* If the softmmu is enabled, the translation of Load/Store exclusive
+ * instructions will rely on the gen_helper_{ldlink,stcond} helpers,
+ * offloading most of the work to the softmmu_llsc_template.h functions.
+
+ Otherwise, these instructions are implemented by remembering
the value/address loaded, and seeing if these are the same
when the store is performed. This should be sufficient to implement
the architecturally mandated semantics, and avoids having to monitor
regular stores.
- In system emulation mode only one CPU will be running at once, so
- this sequence is effectively atomic. In user emulation mode we
- throw an exception and handle the atomic operation elsewhere. */
+ In user emulation mode we throw an exception and handle the atomic
+ operation elsewhere. */
+#ifdef CONFIG_TCG_USE_LDST_EXCL
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
+ TCGv_i32 addr, int size)
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_i32 mem_idx = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(mem_idx, get_mem_index(s));
+
+ if (size != 3) {
+ switch (size) {
+ case 0:
+ gen_helper_ldlink_aa32_i8(tmp, cpu_env, addr, mem_idx);
+ break;
+ case 1:
+ gen_helper_ldlink_aa32_i16(tmp, cpu_env, addr, mem_idx);
+ break;
+ case 2:
+ gen_helper_ldlink_aa32_i32(tmp, cpu_env, addr, mem_idx);
+ break;
+ default:
+ abort();
+ }
+
+ store_reg(s, rt, tmp);
+ } else {
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+ TCGv_i32 tmph = tcg_temp_new_i32();
+
+ gen_helper_ldlink_aa32_i64(tmp64, cpu_env, addr, mem_idx);
+ tcg_gen_extr_i64_i32(tmp, tmph, tmp64);
+
+ store_reg(s, rt, tmp);
+ store_reg(s, rt2, tmph);
+
+ tcg_temp_free_i64(tmp64);
+ }
+
+ tcg_temp_free_i32(mem_idx);
+
+ /* From now on we are in LL/SC context. */
+ tcg_gen_movi_i32(cpu_ll_sc_context, 1);
+}
+#else
static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 addr, int size)
{
@@ -7429,10 +7485,14 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
store_reg(s, rt, tmp);
tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
}
+#endif
static void gen_clrex(DisasContext *s)
{
+#ifdef CONFIG_TCG_USE_LDST_EXCL
+#else
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+#endif
}
#ifdef CONFIG_USER_ONLY
@@ -7444,6 +7504,59 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
size | (rd << 4) | (rt << 8) | (rt2 << 12));
gen_exception_internal_insn(s, 4, EXCP_STREX);
}
+#elif defined CONFIG_TCG_USE_LDST_EXCL
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv_i32 addr, int size)
+{
+ TCGv_i32 tmp, mem_idx;
+ TCGLabel *done_label, *fail_label;
+
+ fail_label = gen_new_label();
+ done_label = gen_new_label();
+ mem_idx = tcg_temp_new_i32();
+
+ /* Fail if we are not in LL/SC context. */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ll_sc_context, 1, fail_label);
+
+ tcg_gen_movi_i32(mem_idx, get_mem_index(s));
+ tmp = load_reg(s, rt);
+
+ if (size != 3) {
+ switch (size) {
+ case 0:
+ gen_helper_stcond_aa32_i8(cpu_R[rd], cpu_env, addr, tmp, mem_idx);
+ break;
+ case 1:
+ gen_helper_stcond_aa32_i16(cpu_R[rd], cpu_env, addr, tmp, mem_idx);
+ break;
+ case 2:
+ gen_helper_stcond_aa32_i32(cpu_R[rd], cpu_env, addr, tmp, mem_idx);
+ break;
+ default:
+ abort();
+ }
+ } else {
+ TCGv_i64 tmp64;
+ TCGv_i32 tmp2;
+
+ tmp64 = tcg_temp_new_i64();
+ tmp2 = load_reg(s, rt2);
+ tcg_gen_concat_i32_i64(tmp64, tmp, tmp2);
+ gen_helper_stcond_aa32_i64(cpu_R[rd], cpu_env, addr, tmp64, mem_idx);
+
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i64(tmp64);
+ }
+ tcg_gen_br(done_label);
+
+ gen_set_label(fail_label);
+ tcg_gen_movi_i32(cpu_ll_sc_context, 0);
+ tcg_gen_movi_i32(cpu_R[rd], 1);
+ gen_set_label(done_label);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(mem_idx);
+}
#else
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
--
2.5.3
next prev parent reply other threads:[~2015-09-24 8:29 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-09-24 8:32 [Qemu-devel] [RFC v5 0/6] Slow-path for atomic instruction translation Alvise Rigo
2015-09-24 8:32 ` [Qemu-devel] [RFC v5 1/6] exec.c: Add new exclusive bitmap to ram_list Alvise Rigo
2015-09-26 17:15 ` Richard Henderson
2015-09-28 7:28 ` alvise rigo
2015-09-24 8:32 ` [Qemu-devel] [RFC v5 2/6] softmmu: Add new TLB_EXCL flag Alvise Rigo
2015-09-30 3:34 ` Richard Henderson
2015-09-30 9:24 ` alvise rigo
2015-09-30 11:09 ` Peter Maydell
2015-09-30 12:44 ` alvise rigo
2015-09-30 20:37 ` Richard Henderson
2015-09-24 8:32 ` [Qemu-devel] [RFC v5 3/6] softmmu: Add helpers for a new slowpath Alvise Rigo
2015-09-30 3:58 ` Richard Henderson
2015-09-30 9:46 ` alvise rigo
2015-09-30 20:42 ` Richard Henderson
2015-10-01 8:05 ` alvise rigo
2015-10-01 19:34 ` Richard Henderson
2015-09-24 8:32 ` [Qemu-devel] [RFC v5 4/6] target-arm: Create new runtime helpers for excl accesses Alvise Rigo
2015-09-30 4:03 ` Richard Henderson
2015-09-30 10:16 ` alvise rigo
2015-09-24 8:32 ` [Qemu-devel] [RFC v5 5/6] configure: Use slow-path for atomic only when the softmmu is enabled Alvise Rigo
2015-09-30 4:05 ` Richard Henderson
2015-09-30 9:51 ` alvise rigo
2015-09-24 8:32 ` Alvise Rigo [this message]
2015-09-30 4:44 ` [Qemu-devel] [RFC v5 0/6] Slow-path for atomic instruction translation Paolo Bonzini
2015-09-30 8:14 ` alvise rigo
2015-09-30 13:20 ` Paolo Bonzini
2015-10-01 19:32 ` Emilio G. Cota
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1443083566-10994-7-git-send-email-a.rigo@virtualopensystems.com \
--to=a.rigo@virtualopensystems.com \
--cc=alex.bennee@linaro.org \
--cc=claudio.fontana@huawei.com \
--cc=jani.kokkonen@huawei.com \
--cc=mttcg@listserver.greensocs.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=tech@virtualopensystems.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).