From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: thuth@redhat.com, qemu-s390x@nongnu.org,
Ilya Leoshkevich <iii@linux.ibm.com>
Subject: [PATCH v4 08/27] target/s390x: Introduce gen_psw_addr_disp
Date: Mon, 20 Feb 2023 08:40:33 -1000 [thread overview]
Message-ID: <20230220184052.163465-9-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230220184052.163465-1-richard.henderson@linaro.org>
In preparation for TARGET_TB_PCREL, reduce reliance on absolute values.
Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/s390x/tcg/translate.c | 69 ++++++++++++++++++++++++------------
1 file changed, 46 insertions(+), 23 deletions(-)
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index bd7f644f83..48e558a790 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -169,6 +169,11 @@ static uint64_t inline_branch_hit[CC_OP_MAX];
static uint64_t inline_branch_miss[CC_OP_MAX];
#endif
+static void gen_psw_addr_disp(DisasContext *s, TCGv_i64 dest, int64_t disp)
+{
+ tcg_gen_movi_i64(dest, s->base.pc_next + disp);
+}
+
static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
{
if (s->base.tb->flags & FLAG_MASK_32) {
@@ -346,18 +351,24 @@ static void return_low128(TCGv_i64 dest)
static void update_psw_addr(DisasContext *s)
{
- /* psw.addr */
- tcg_gen_movi_i64(psw_addr, s->base.pc_next);
+ gen_psw_addr_disp(s, psw_addr, 0);
}
static void per_branch(DisasContext *s, bool to_next)
{
#ifndef CONFIG_USER_ONLY
- tcg_gen_movi_i64(gbea, s->base.pc_next);
+ gen_psw_addr_disp(s, gbea, 0);
if (s->base.tb->flags & FLAG_MASK_PER) {
- TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
- gen_helper_per_branch(cpu_env, gbea, next_pc);
+ if (to_next) {
+ TCGv_i64 next_pc = tcg_temp_new_i64();
+
+ gen_psw_addr_disp(s, next_pc, s->ilen);
+ gen_helper_per_branch(cpu_env, gbea, next_pc);
+ tcg_temp_free_i64(next_pc);
+ } else {
+ gen_helper_per_branch(cpu_env, gbea, psw_addr);
+ }
}
#endif
}
@@ -370,20 +381,23 @@ static void per_branch_cond(DisasContext *s, TCGCond cond,
TCGLabel *lab = gen_new_label();
tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
- tcg_gen_movi_i64(gbea, s->base.pc_next);
+ gen_psw_addr_disp(s, gbea, 0);
gen_helper_per_branch(cpu_env, gbea, psw_addr);
gen_set_label(lab);
} else {
- TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
+ TCGv_i64 pc = tcg_temp_new_i64();
+
+ gen_psw_addr_disp(s, pc, 0);
tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
+ tcg_temp_free_i64(pc);
}
#endif
}
static void per_breaking_event(DisasContext *s)
{
- tcg_gen_movi_i64(gbea, s->base.pc_next);
+ gen_psw_addr_disp(s, gbea, 0);
}
static void update_cc_op(DisasContext *s)
@@ -1163,21 +1177,19 @@ struct DisasInsn {
static DisasJumpType help_goto_direct(DisasContext *s, int64_t disp)
{
- uint64_t dest = s->base.pc_next + disp;
-
- if (dest == s->pc_tmp) {
+ if (disp == s->ilen) {
per_branch(s, true);
return DISAS_NEXT;
}
- if (use_goto_tb(s, dest)) {
+ if (use_goto_tb(s, s->base.pc_next + disp)) {
update_cc_op(s);
per_breaking_event(s);
tcg_gen_goto_tb(0);
- tcg_gen_movi_i64(psw_addr, dest);
+ gen_psw_addr_disp(s, psw_addr, disp);
tcg_gen_exit_tb(s->base.tb, 0);
return DISAS_NORETURN;
} else {
- tcg_gen_movi_i64(psw_addr, dest);
+ gen_psw_addr_disp(s, psw_addr, disp);
per_branch(s, false);
return DISAS_PC_UPDATED;
}
@@ -1235,14 +1247,14 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
/* Branch not taken. */
tcg_gen_goto_tb(0);
- tcg_gen_movi_i64(psw_addr, s->pc_tmp);
+ gen_psw_addr_disp(s, psw_addr, s->ilen);
tcg_gen_exit_tb(s->base.tb, 0);
/* Branch taken. */
gen_set_label(lab);
per_breaking_event(s);
tcg_gen_goto_tb(1);
- tcg_gen_movi_i64(psw_addr, dest);
+ gen_psw_addr_disp(s, psw_addr, disp);
tcg_gen_exit_tb(s->base.tb, 1);
ret = DISAS_NORETURN;
@@ -1265,12 +1277,12 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
/* Branch not taken. */
update_cc_op(s);
tcg_gen_goto_tb(0);
- tcg_gen_movi_i64(psw_addr, s->pc_tmp);
+ gen_psw_addr_disp(s, psw_addr, s->ilen);
tcg_gen_exit_tb(s->base.tb, 0);
gen_set_label(lab);
if (is_imm) {
- tcg_gen_movi_i64(psw_addr, dest);
+ gen_psw_addr_disp(s, psw_addr, disp);
}
per_breaking_event(s);
ret = DISAS_PC_UPDATED;
@@ -1280,9 +1292,12 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
Most commonly we're single-stepping or some other condition that
disables all use of goto_tb. Just update the PC and exit. */
- TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
+ TCGv_i64 next = tcg_temp_new_i64();
+
+ gen_psw_addr_disp(s, next, s->ilen);
if (is_imm) {
- cdest = tcg_constant_i64(dest);
+ cdest = tcg_temp_new_i64();
+ gen_psw_addr_disp(s, cdest, disp);
}
if (c->is_64) {
@@ -1301,6 +1316,10 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
tcg_temp_free_i64(t1);
}
+ tcg_temp_free_i64(next);
+ if (is_imm) {
+ tcg_temp_free_i64(cdest);
+ }
ret = DISAS_PC_UPDATED;
}
@@ -5891,7 +5910,8 @@ static void in2_a2(DisasContext *s, DisasOps *o)
static void in2_ri2(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
+ o->in2 = tcg_temp_new_i64();
+ gen_psw_addr_disp(s, o->in2, (int64_t)get_field(s, i2) * 2);
}
#define SPEC_in2_ri2 0
@@ -6376,8 +6396,11 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
#ifndef CONFIG_USER_ONLY
if (s->base.tb->flags & FLAG_MASK_PER) {
- TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
+ TCGv_i64 addr = tcg_temp_new_i64();
+
+ gen_psw_addr_disp(s, addr, 0);
gen_helper_per_ifetch(cpu_env, addr);
+ tcg_temp_free_i64(addr);
}
#endif
@@ -6506,7 +6529,7 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
if (s->base.tb->flags & FLAG_MASK_PER) {
/* An exception might be triggered, save PSW if not already done. */
if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
- tcg_gen_movi_i64(psw_addr, s->pc_tmp);
+ gen_psw_addr_disp(s, psw_addr, s->ilen);
}
/* Call the helper to check for a possible PER exception. */
--
2.34.1
next prev parent reply other threads:[~2023-02-20 18:46 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-20 18:40 [PATCH v4 00/27] target/s390x: pc-relative translation blocks Richard Henderson
2023-02-20 18:40 ` [PATCH v4 01/27] target/s390x: Use tcg_constant_* in local contexts Richard Henderson
2023-02-20 18:40 ` [PATCH v4 02/27] target/s390x: Use tcg_constant_* for DisasCompare Richard Henderson
2023-02-20 18:40 ` [PATCH v4 03/27] target/s390x: Use tcg_constant_i32 for fpinst_extract_m34 Richard Henderson
2023-02-20 18:40 ` [PATCH v4 04/27] target/s390x: Use tcg_constant_* in translate_vx.c.inc Richard Henderson
2023-02-20 18:40 ` [PATCH v4 05/27] tests/tcg/s390x: Add bal.S Richard Henderson
2023-02-20 18:40 ` [PATCH v4 06/27] tests/tcg/s390x: Add sam.S Richard Henderson
2023-02-20 18:40 ` [PATCH v4 07/27] target/s390x: Change help_goto_direct to work on displacements Richard Henderson
2023-02-20 18:40 ` Richard Henderson [this message]
2023-02-20 18:40 ` [PATCH v4 09/27] target/s390x: Remove pc argument to pc_to_link_into Richard Henderson
2023-02-20 18:40 ` [PATCH v4 10/27] target/s390x: Use gen_psw_addr_disp in pc_to_link_info Richard Henderson
2023-02-20 18:40 ` [PATCH v4 11/27] target/s390x: Use gen_psw_addr_disp in save_link_info Richard Henderson
2023-02-20 18:40 ` [PATCH v4 12/27] target/s390x: Use gen_psw_addr_disp in op_sam Richard Henderson
2023-02-20 18:40 ` [PATCH v4 13/27] target/s390x: Use ilen instead in branches Richard Henderson
2023-02-20 18:40 ` [PATCH v4 14/27] target/s390x: Assert masking of psw.addr in cpu_get_tb_cpu_state Richard Henderson
2023-02-20 18:40 ` [PATCH v4 15/27] target/s390x: Add disp argument to update_psw_addr Richard Henderson
2023-02-20 18:40 ` [PATCH v4 16/27] target/s390x: Don't set gbea for user-only Richard Henderson
2023-02-20 18:40 ` [PATCH v4 17/27] target/s390x: Introduce per_enabled Richard Henderson
2023-02-20 18:40 ` [PATCH v4 18/27] target/s390x: Disable conditional branch-to-next for PER Richard Henderson
2023-02-20 18:40 ` [PATCH v4 19/27] target/s390x: Introduce help_goto_indirect Richard Henderson
2023-02-20 18:40 ` [PATCH v4 20/27] target/s390x: Split per_branch Richard Henderson
2023-02-20 18:40 ` [PATCH v4 21/27] target/s390x: Simplify help_branch Richard Henderson
2023-02-20 18:40 ` [PATCH v4 22/27] target/s390x: Split per_breaking_event from per_branch_* Richard Henderson
2023-02-20 18:40 ` [PATCH v4 23/27] target/s390x: Remove PER check from use_goto_tb Richard Henderson
2023-02-20 18:40 ` [PATCH v4 24/27] target/s390x: Fix successful-branch PER events Richard Henderson
2023-02-20 18:40 ` [PATCH v4 25/27] tests/tcg/s390x: Add per.S Richard Henderson
2023-02-20 18:40 ` [PATCH v4 26/27] target/s390x: Pass original r2 register to BCR Richard Henderson
2023-02-20 18:40 ` [PATCH v4 27/27] target/s390x: Enable TARGET_TB_PCREL Richard Henderson
2023-02-21 14:35 ` Thomas Huth
2023-02-21 15:53 ` Richard Henderson
2023-02-27 11:41 ` Thomas Huth
2023-03-02 13:03 ` Thomas Huth
2023-03-02 18:05 ` Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230220184052.163465-9-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=iii@linux.ibm.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-s390x@nongnu.org \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).