From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-ppc@nongnu.org, david@gibson.dropbear.id.au
Subject: [Qemu-devel] [PATCH 04/13] target/ppc: Use atomic cmpxchg for STQCX
Date: Tue, 26 Jun 2018 09:19:12 -0700 [thread overview]
Message-ID: <20180626161921.27941-5-richard.henderson@linaro.org> (raw)
In-Reply-To: <20180626161921.27941-1-richard.henderson@linaro.org>
When running in a parallel context, we must use a helper in order
to perform the 128-bit atomic operation. When running in a serial
context, do the compare before the store.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/ppc/helper.h | 2 +
target/ppc/mem_helper.c | 38 +++++++++++++++++
target/ppc/translate.c | 95 ++++++++++++++++++++++++++---------------
3 files changed, 101 insertions(+), 34 deletions(-)
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index cbc1228570..5706c2497f 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -807,4 +807,6 @@ DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
void, env, tl, i64, i64, i32)
DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG,
void, env, tl, i64, i64, i32)
+DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32)
+DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32)
#endif
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index 57e301edc3..8f0d86d104 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -245,6 +245,44 @@ void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
Int128 val = int128_make128(lo, hi);
helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
}
+
+uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t new_lo, uint64_t new_hi,
+ uint32_t opidx)
+{
+ bool success = false;
+
+ if (likely(addr == env->reserve_addr)) {
+ Int128 oldv, cmpv, newv;
+
+ cmpv = int128_make128(env->reserve_val2, env->reserve_val);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
+ success = int128_eq(oldv, cmpv);
+ }
+ env->reserve_addr = -1;
+ return env->so + success * CRF_EQ_BIT;
+}
+
+uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t new_lo, uint64_t new_hi,
+ uint32_t opidx)
+{
+ bool success = false;
+
+ if (likely(addr == env->reserve_addr)) {
+ Int128 oldv, cmpv, newv;
+
+ cmpv = int128_make128(env->reserve_val2, env->reserve_val);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
+ success = int128_eq(oldv, cmpv);
+ }
+ env->reserve_addr = -1;
+ return env->so + success * CRF_EQ_BIT;
+}
#endif
/*****************************************************************************/
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 3d63a62269..c7b9d226eb 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3332,50 +3332,77 @@ static void gen_lqarx(DisasContext *ctx)
/* stqcx. */
static void gen_stqcx_(DisasContext *ctx)
{
- TCGv EA;
- int reg = rS(ctx->opcode);
- int len = 16;
-#if !defined(CONFIG_USER_ONLY)
- TCGLabel *l1;
- TCGv gpr1, gpr2;
-#endif
+ int rs = rS(ctx->opcode);
+ TCGv EA, hi, lo;
- if (unlikely((rD(ctx->opcode) & 1))) {
+ if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
+
gen_set_access_type(ctx, ACCESS_RES);
- EA = tcg_temp_local_new();
+ EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- if (len > 1) {
- gen_check_align(ctx, EA, (len) - 1);
- }
-#if defined(CONFIG_USER_ONLY)
- gen_conditional_store(ctx, EA, reg, 16);
+ /* Note that the low part is always in RS+1, even in LE mode. */
+ lo = cpu_gpr[rs + 1];
+ hi = cpu_gpr[rs];
+
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
+#ifdef CONFIG_ATOMIC128
+ if (ctx->le_mode) {
+ gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
+ } else {
+ gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
+ }
#else
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
- l1 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
- tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
-
- if (unlikely(ctx->le_mode)) {
- gpr1 = cpu_gpr[reg + 1];
- gpr2 = cpu_gpr[reg];
- } else {
- gpr1 = cpu_gpr[reg];
- gpr2 = cpu_gpr[reg + 1];
- }
- tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
-
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_reserve, -1);
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
#endif
- tcg_temp_free(EA);
-}
+ tcg_temp_free(EA);
+ tcg_temp_free_i32(oi);
+ } else {
+ TCGLabel *lab_fail = gen_new_label();
+ TCGLabel *lab_over = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
+ tcg_temp_free(EA);
+
+ gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
+ tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
+ ? offsetof(CPUPPCState, reserve_val2)
+ : offsetof(CPUPPCState, reserve_val)));
+ tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+
+ tcg_gen_addi_i64(t0, cpu_reserve, 8);
+ gen_qemu_ld64_i64(ctx, t0, t0);
+ tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
+ ? offsetof(CPUPPCState, reserve_val)
+ : offsetof(CPUPPCState, reserve_val2)));
+ tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+
+ /* Success */
+ gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
+ tcg_gen_addi_i64(t0, cpu_reserve, 8);
+ gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
+
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
+ tcg_gen_br(lab_over);
+
+ gen_set_label(lab_fail);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+
+ gen_set_label(lab_over);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
#endif /* defined(TARGET_PPC64) */
/* sync */
--
2.17.1
next prev parent reply other threads:[~2018-06-26 16:19 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-26 16:19 [Qemu-devel] [PATCH 00/13] target/ppc improve atomic operations Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 01/13] target/ppc: Add do_unaligned_access hook Richard Henderson
2018-06-27 9:09 ` David Gibson
2018-06-27 13:52 ` Richard Henderson
2018-06-28 3:46 ` David Gibson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 02/13] target/ppc: Use atomic load for LQ and LQARX Richard Henderson
2018-06-28 3:49 ` David Gibson
2018-06-28 15:22 ` Richard Henderson
2018-06-29 3:33 ` David Gibson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 03/13] target/ppc: Use atomic store for STQ Richard Henderson
2018-06-28 3:51 ` David Gibson
2018-06-29 3:33 ` David Gibson
2018-06-26 16:19 ` Richard Henderson [this message]
2018-06-26 16:19 ` [Qemu-devel] [PATCH 05/13] target/ppc: Remove POWERPC_EXCP_STCX Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 06/13] target/ppc: Tidy gen_conditional_store Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 07/13] target/ppc: Split out gen_load_locked Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 08/13] target/ppc: Split out gen_ld_atomic Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 09/13] target/ppc: Split out gen_st_atomic Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 10/13] target/ppc: Use MO_ALIGN for EXIWX and ECOWX Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 11/13] target/ppc: Use atomic min/max helpers Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 12/13] target/ppc: Implement the rest of gen_ld_atomic Richard Henderson
2018-06-26 16:19 ` [Qemu-devel] [PATCH 13/13] target/ppc: Implement the rest of gen_st_atomic Richard Henderson
2018-06-29 4:15 ` [Qemu-devel] [PATCH 00/13] target/ppc improve atomic operations David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180626161921.27941-5-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=david@gibson.dropbear.id.au \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).