From: "Alex Bennée" <alex.bennee@linaro.org>
To: Alvise Rigo <a.rigo@virtualopensystems.com>
Cc: mttcg@listserver.greensocs.com, claudio.fontana@huawei.com,
qemu-devel@nongnu.org, pbonzini@redhat.com,
jani.kokkonen@huawei.com, tech@virtualopensystems.com,
rth@twiddle.net
Subject: Re: [Qemu-devel] [RFC v6 07/14] target-arm: translate: Use ld/st excl for atomic insns
Date: Wed, 06 Jan 2016 17:11:02 +0000 [thread overview]
Message-ID: <878u42d461.fsf@linaro.org> (raw)
In-Reply-To: <1450082498-27109-8-git-send-email-a.rigo@virtualopensystems.com>
Alvise Rigo <a.rigo@virtualopensystems.com> writes:
> Use the new LL/SC runtime helpers to handle the ARM atomic
> instructions in softmmu_llsc_template.h.
>
> In general, the helper generator
> gen_helper_{ldlink,stcond}_aa32_i{8,16,32,64}() calls the function
> helper_{le,be}_{ldlink,stcond}{ub,uw,ulq}_mmu() implemented in
> softmmu_llsc_template.h.
>
> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
> ---
> target-arm/translate.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 97 insertions(+), 4 deletions(-)
>
> diff --git a/target-arm/translate.c b/target-arm/translate.c
> index 5d22879..e88d8a3 100644
> --- a/target-arm/translate.c
> +++ b/target-arm/translate.c
> @@ -64,8 +64,10 @@ TCGv_ptr cpu_env;
> static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
> static TCGv_i32 cpu_R[16];
> TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
> +#ifndef CONFIG_TCG_USE_LDST_EXCL
> TCGv_i64 cpu_exclusive_addr;
> TCGv_i64 cpu_exclusive_val;
> +#endif
> #ifdef CONFIG_USER_ONLY
> TCGv_i64 cpu_exclusive_test;
> TCGv_i32 cpu_exclusive_info;
> @@ -98,10 +100,12 @@ void arm_translate_init(void)
> cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
> cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
>
> +#ifndef CONFIG_TCG_USE_LDST_EXCL
> cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
> offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
> cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
> offsetof(CPUARMState, exclusive_val), "exclusive_val");
> +#endif
> #ifdef CONFIG_USER_ONLY
> cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
> offsetof(CPUARMState, exclusive_test), "exclusive_test");
> @@ -7414,15 +7418,59 @@ static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
> tcg_gen_or_i32(cpu_ZF, lo, hi);
> }
>
> -/* Load/Store exclusive instructions are implemented by remembering
> +/* If the softmmu is enabled, the translation of Load/Store exclusive
> + * instructions will rely on the gen_helper_{ldlink,stcond} helpers,
> + * offloading most of the work to the softmmu_llsc_template.h functions.
> +
> + Otherwise, these instructions are implemented by remembering
> the value/address loaded, and seeing if these are the same
> when the store is performed. This should be sufficient to implement
> the architecturally mandated semantics, and avoids having to monitor
> regular stores.
>
> - In system emulation mode only one CPU will be running at once, so
> - this sequence is effectively atomic. In user emulation mode we
> - throw an exception and handle the atomic operation elsewhere. */
> + In user emulation mode we throw an exception and handle the atomic
> + operation elsewhere. */
> +#ifdef CONFIG_TCG_USE_LDST_EXCL
> +static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
> + TCGv_i32 addr, int size)
> + {
> + TCGv_i32 tmp = tcg_temp_new_i32();
> + TCGv_i32 mem_idx = tcg_temp_new_i32();
> +
> + tcg_gen_movi_i32(mem_idx, get_mem_index(s));
> +
> + if (size != 3) {
> + switch (size) {
> + case 0:
> + gen_helper_ldlink_aa32_i8(tmp, cpu_env, addr, mem_idx);
> + break;
> + case 1:
> + gen_helper_ldlink_aa32_i16(tmp, cpu_env, addr, mem_idx);
> + break;
> + case 2:
> + gen_helper_ldlink_aa32_i32(tmp, cpu_env, addr, mem_idx);
> + break;
> + default:
> + abort();
> + }
> +
> + store_reg(s, rt, tmp);
> + } else {
> + TCGv_i64 tmp64 = tcg_temp_new_i64();
> + TCGv_i32 tmph = tcg_temp_new_i32();
> +
> + gen_helper_ldlink_aa32_i64(tmp64, cpu_env, addr, mem_idx);
> + tcg_gen_extr_i64_i32(tmp, tmph, tmp64);
> +
> + store_reg(s, rt, tmp);
> + store_reg(s, rt2, tmph);
> +
> + tcg_temp_free_i64(tmp64);
> + }
> +
> + tcg_temp_free_i32(mem_idx);
> +}
> +#else
> static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
> TCGv_i32 addr, int size)
> {
> @@ -7461,10 +7509,14 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
> store_reg(s, rt, tmp);
> tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
> }
> +#endif
>
> static void gen_clrex(DisasContext *s)
> {
> +#ifdef CONFIG_TCG_USE_LDST_EXCL
I don't think it would be correct to ignore clrex in softmmu mode.
Assuming the code path had used it we may well be creating slow-path
transitions for no reason.
> +#else
> tcg_gen_movi_i64(cpu_exclusive_addr, -1);
> +#endif
> }
>
> #ifdef CONFIG_USER_ONLY
> @@ -7476,6 +7528,47 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
> size | (rd << 4) | (rt << 8) | (rt2 << 12));
> gen_exception_internal_insn(s, 4, EXCP_STREX);
> }
> +#elif defined CONFIG_TCG_USE_LDST_EXCL
> +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
> + TCGv_i32 addr, int size)
> +{
> + TCGv_i32 tmp, mem_idx;
> +
> + mem_idx = tcg_temp_new_i32();
> +
> + tcg_gen_movi_i32(mem_idx, get_mem_index(s));
> + tmp = load_reg(s, rt);
> +
> + if (size != 3) {
> + switch (size) {
> + case 0:
> + gen_helper_stcond_aa32_i8(cpu_R[rd], cpu_env, addr, tmp, mem_idx);
> + break;
> + case 1:
> + gen_helper_stcond_aa32_i16(cpu_R[rd], cpu_env, addr, tmp, mem_idx);
> + break;
> + case 2:
> + gen_helper_stcond_aa32_i32(cpu_R[rd], cpu_env, addr, tmp, mem_idx);
> + break;
> + default:
> + abort();
> + }
> + } else {
> + TCGv_i64 tmp64;
> + TCGv_i32 tmp2;
> +
> + tmp64 = tcg_temp_new_i64();
> + tmp2 = load_reg(s, rt2);
> + tcg_gen_concat_i32_i64(tmp64, tmp, tmp2);
> + gen_helper_stcond_aa32_i64(cpu_R[rd], cpu_env, addr, tmp64, mem_idx);
> +
> + tcg_temp_free_i32(tmp2);
> + tcg_temp_free_i64(tmp64);
> + }
> +
> + tcg_temp_free_i32(tmp);
> + tcg_temp_free_i32(mem_idx);
> +}
> #else
> static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
> TCGv_i32 addr, int size)
--
Alex Bennée
next prev parent reply other threads:[~2016-01-06 17:11 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-12-14 8:41 [Qemu-devel] [RFC v6 00/14] Slow-path for atomic instruction translation Alvise Rigo
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 01/14] exec.c: Add new exclusive bitmap to ram_list Alvise Rigo
2015-12-18 13:18 ` Alex Bennée
2015-12-18 13:47 ` alvise rigo
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 02/14] softmmu: Add new TLB_EXCL flag Alvise Rigo
2016-01-05 16:10 ` Alex Bennée
2016-01-05 17:27 ` alvise rigo
2016-01-05 18:39 ` Alex Bennée
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 03/14] Add CPUClass hook to set exclusive range Alvise Rigo
2016-01-05 16:42 ` Alex Bennée
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 04/14] softmmu: Add helpers for a new slowpath Alvise Rigo
2016-01-06 15:16 ` Alex Bennée
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 05/14] tcg: Create new runtime helpers for excl accesses Alvise Rigo
2015-12-14 9:40 ` Paolo Bonzini
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 06/14] configure: Use slow-path for atomic only when the softmmu is enabled Alvise Rigo
2015-12-14 9:38 ` Paolo Bonzini
2015-12-14 9:39 ` Paolo Bonzini
2015-12-14 10:14 ` Laurent Vivier
2015-12-15 14:23 ` alvise rigo
2015-12-15 14:31 ` Paolo Bonzini
2015-12-15 15:18 ` Laurent Vivier
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 07/14] target-arm: translate: Use ld/st excl for atomic insns Alvise Rigo
2016-01-06 17:11 ` Alex Bennée [this message]
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 08/14] target-arm: Add atomic_clear helper for CLREX insn Alvise Rigo
2016-01-06 17:13 ` Alex Bennée
2016-01-06 17:27 ` alvise rigo
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 09/14] softmmu: Add history of excl accesses Alvise Rigo
2015-12-14 9:35 ` Paolo Bonzini
2015-12-15 14:26 ` alvise rigo
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 10/14] softmmu: Simplify helper_*_st_name, wrap unaligned code Alvise Rigo
2016-01-07 14:46 ` Alex Bennée
2016-01-07 15:09 ` alvise rigo
2016-01-07 16:35 ` Alex Bennée
2016-01-07 16:54 ` alvise rigo
2016-01-07 17:36 ` Alex Bennée
2016-01-08 11:19 ` Alex Bennée
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 11/14] softmmu: Simplify helper_*_st_name, wrap MMIO code Alvise Rigo
2016-01-11 9:54 ` Alex Bennée
2016-01-11 10:19 ` alvise rigo
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 12/14] softmmu: Simplify helper_*_st_name, wrap RAM code Alvise Rigo
2015-12-17 16:52 ` Alex Bennée
2015-12-17 17:13 ` alvise rigo
2015-12-17 20:20 ` Alex Bennée
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 13/14] softmmu: Include MMIO/invalid exclusive accesses Alvise Rigo
2015-12-14 8:41 ` [Qemu-devel] [RFC v6 14/14] softmmu: Protect MMIO exclusive range Alvise Rigo
2015-12-14 9:33 ` [Qemu-devel] [RFC v6 00/14] Slow-path for atomic instruction translation Paolo Bonzini
2015-12-14 10:04 ` alvise rigo
2015-12-14 10:17 ` Paolo Bonzini
2015-12-15 13:59 ` alvise rigo
2015-12-15 14:18 ` Paolo Bonzini
2015-12-15 14:22 ` alvise rigo
2015-12-14 22:09 ` Andreas Tobler
2015-12-15 8:16 ` alvise rigo
2015-12-17 16:06 ` Alex Bennée
2015-12-17 16:16 ` alvise rigo
2016-01-06 18:00 ` Andrew Baumann
2016-01-07 10:21 ` alvise rigo
2016-01-07 10:22 ` Peter Maydell
2016-01-07 10:49 ` alvise rigo
2016-01-07 11:16 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=878u42d461.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=a.rigo@virtualopensystems.com \
--cc=claudio.fontana@huawei.com \
--cc=jani.kokkonen@huawei.com \
--cc=mttcg@listserver.greensocs.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=tech@virtualopensystems.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).