* [PULL 01/75] accel/tcg: Name gen_goto_tb()'s TB slot index as @tb_slot_idx
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 02/75] target/alpha: Access CPUState::cpu_index via helper Philippe Mathieu-Daudé
` (73 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
tcg_gen_goto_tb() takes an unsigned index to the TB slot (0 or 1).
Declare the argument as unsigned and rename it as @tb_slot_idx
(which is more descriptive than @n) on all targets.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Message-Id: <20251010031745.37528-1-philmd@linaro.org>
---
target/alpha/translate.c | 8 ++++----
target/arm/tcg/translate-a64.c | 8 ++++----
target/arm/tcg/translate.c | 8 ++++----
target/avr/translate.c | 7 ++++---
target/hexagon/translate.c | 8 ++++----
target/loongarch/tcg/translate.c | 7 ++++---
target/microblaze/translate.c | 7 ++++---
target/mips/tcg/translate.c | 7 ++++---
target/ppc/translate.c | 7 ++++---
target/riscv/translate.c | 9 +++++----
target/rx/translate.c | 7 ++++---
target/sh4/translate.c | 7 ++++---
target/sparc/translate.c | 6 +++---
target/tricore/translate.c | 7 ++++---
14 files changed, 56 insertions(+), 47 deletions(-)
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index f11b382438c..a492520075e 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -436,18 +436,18 @@ static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
return DISAS_NEXT;
}
-static void gen_goto_tb(DisasContext *ctx, int idx, int32_t disp)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, int32_t disp)
{
if (translator_use_goto_tb(&ctx->base, ctx->base.pc_next + disp)) {
/* With PCREL, PC must always be up-to-date. */
if (ctx->pcrel) {
gen_pc_disp(ctx, cpu_pc, disp);
- tcg_gen_goto_tb(idx);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(idx);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_pc_disp(ctx, cpu_pc, disp);
}
- tcg_gen_exit_tb(ctx->base.tb, idx);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
gen_pc_disp(ctx, cpu_pc, disp);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 918d5ed1120..3292d7cbfd9 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -528,7 +528,7 @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
return translator_use_goto_tb(&s->base, dest);
}
-static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
+static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx, int64_t diff)
{
if (use_goto_tb(s, s->pc_curr + diff)) {
/*
@@ -541,12 +541,12 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
*/
if (tb_cflags(s->base.tb) & CF_PCREL) {
gen_a64_update_pc(s, diff);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_a64_update_pc(s, diff);
}
- tcg_gen_exit_tb(s->base.tb, n);
+ tcg_gen_exit_tb(s->base.tb, tb_slot_idx);
s->base.is_jmp = DISAS_NORETURN;
} else {
gen_a64_update_pc(s, diff);
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index 3df0bbcb7f8..5f64fed2202 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -1310,7 +1310,7 @@ static void gen_goto_ptr(void)
* cpu_loop_exec. Any live exit_requests will be processed as we
* enter the next TB.
*/
-static void gen_goto_tb(DisasContext *s, int n, target_long diff)
+static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx, target_long diff)
{
if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) {
/*
@@ -1323,12 +1323,12 @@ static void gen_goto_tb(DisasContext *s, int n, target_long diff)
*/
if (tb_cflags(s->base.tb) & CF_PCREL) {
gen_update_pc(s, diff);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_update_pc(s, diff);
}
- tcg_gen_exit_tb(s->base.tb, n);
+ tcg_gen_exit_tb(s->base.tb, tb_slot_idx);
} else {
gen_update_pc(s, diff);
gen_goto_ptr();
diff --git a/target/avr/translate.c b/target/avr/translate.c
index 804b0b21dbd..ef6f655a458 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -981,14 +981,15 @@ static void gen_pop_ret(DisasContext *ctx, TCGv ret)
}
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
const TranslationBlock *tb = ctx->base.tb;
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(tb, n);
+ tcg_gen_exit_tb(tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index 02fd40c160f..50766eafe27 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -133,15 +133,15 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
return translator_use_goto_tb(&ctx->base, dest);
}
-static void gen_goto_tb(DisasContext *ctx, int idx, target_ulong dest, bool
- move_to_pc)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest, bool move_to_pc)
{
if (use_goto_tb(ctx, dest)) {
- tcg_gen_goto_tb(idx);
+ tcg_gen_goto_tb(tb_slot_idx);
if (move_to_pc) {
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest);
}
- tcg_gen_exit_tb(ctx->base.tb, idx);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
if (move_to_pc) {
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest);
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
index 53a0b4c3ce9..57c3b8c4bb2 100644
--- a/target/loongarch/tcg/translate.c
+++ b/target/loongarch/tcg/translate.c
@@ -99,16 +99,17 @@ void generate_exception(DisasContext *ctx, int excp)
ctx->base.is_jmp = DISAS_NORETURN;
}
-static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (ctx->va32) {
dest = (uint32_t) dest;
}
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_tl(cpu_pc, dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 5098a1db4dc..3dd74b021e8 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -116,12 +116,13 @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
gen_raise_exception_sync(dc, EXCP_HW_EXCP);
}
-static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (translator_use_goto_tb(&dc->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(dc->base.tb, n);
+ tcg_gen_exit_tb(dc->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index d91d6efe02c..54849e9ff1a 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -4362,12 +4362,13 @@ static void gen_trap(DisasContext *ctx, uint32_t opc,
}
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_save_pc(dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
gen_save_pc(dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index d422789a1d0..17e6d07c8c2 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3668,16 +3668,17 @@ static void gen_lookup_and_goto_ptr(DisasContext *ctx)
}
/*** Branch ***/
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (NARROW_MODE(ctx)) {
dest = (uint32_t) dest;
}
if (use_goto_tb(ctx, dest)) {
pmu_count_insns(ctx);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_tl(cpu_nip, dest & ~3);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_tl(cpu_nip, dest & ~3);
gen_lookup_and_goto_ptr(ctx);
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index 6fc06c71f51..9a53aecbfe9 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -286,7 +286,8 @@ static void exit_tb(DisasContext *ctx)
tcg_gen_exit_tb(NULL, 0);
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_long diff)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_long diff)
{
target_ulong dest = ctx->base.pc_next + diff;
@@ -305,12 +306,12 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_long diff)
*/
if (tb_cflags(ctx->base.tb) & CF_PCREL) {
gen_update_pc(ctx, diff);
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
} else {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
gen_update_pc(ctx, diff);
}
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
gen_update_pc(ctx, diff);
lookup_and_goto_ptr(ctx);
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 19a9584a829..5fc589c706b 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -147,12 +147,13 @@ void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
-static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (translator_use_goto_tb(&dc->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(dc->base.tb, n);
+ tcg_gen_exit_tb(dc->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 70fd13aa3f5..137b75a4fb2 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -231,12 +231,13 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
return translator_use_goto_tb(&ctx->base, dest);
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
+ target_ulong dest)
{
if (use_goto_tb(ctx, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (use_exit_tb(ctx)) {
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 810e2491a61..d6b599b71fe 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -363,15 +363,15 @@ static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
translator_use_goto_tb(&s->base, npc);
}
-static void gen_goto_tb(DisasContext *s, int tb_num,
+static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx,
target_ulong pc, target_ulong npc)
{
if (use_goto_tb(s, pc, npc)) {
/* jump to same page: we can use a direct jump */
- tcg_gen_goto_tb(tb_num);
+ tcg_gen_goto_tb(tb_slot_idx);
tcg_gen_movi_tl(cpu_pc, pc);
tcg_gen_movi_tl(cpu_npc, npc);
- tcg_gen_exit_tb(s->base.tb, tb_num);
+ tcg_gen_exit_tb(s->base.tb, tb_slot_idx);
} else {
/* jump to another page: we can use an indirect jump */
tcg_gen_movi_tl(cpu_pc, pc);
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 3d0e7a10bd8..7c6e3095971 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -2816,12 +2816,13 @@ static inline void gen_save_pc(target_ulong pc)
tcg_gen_movi_tl(cpu_PC, pc);
}
-static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_index,
+ target_ulong dest)
{
if (translator_use_goto_tb(&ctx->base, dest)) {
- tcg_gen_goto_tb(n);
+ tcg_gen_goto_tb(tb_slot_index);
gen_save_pc(dest);
- tcg_gen_exit_tb(ctx->base.tb, n);
+ tcg_gen_exit_tb(ctx->base.tb, tb_slot_index);
} else {
gen_save_pc(dest);
tcg_gen_lookup_and_goto_ptr();
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 02/75] target/alpha: Access CPUState::cpu_index via helper
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 01/75] accel/tcg: Name gen_goto_tb()'s TB slot index as @tb_slot_idx Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 03/75] target/alpha: Replace VMSTATE_UINTTL() -> VMSTATE_UINT64() Philippe Mathieu-Daudé
` (72 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
CPUState::cpu_index is a target agnostic field, meant
for common code (i.e. accel/ and system/ folders).
Target specific code should use the CPUClass::get_arch_id()
helper, even if there is a 1:1 mapping.
In preparation of generic changes around CPU indexing,
introduce the whoami helper to access the generic
CPUState::cpu_index field.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20250925010438.59755-1-philmd@linaro.org>
---
target/alpha/helper.h | 1 +
target/alpha/sys_helper.c | 5 +++++
target/alpha/translate.c | 3 +--
3 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/target/alpha/helper.h b/target/alpha/helper.h
index 788d2fbf289..954a5c8294c 100644
--- a/target/alpha/helper.h
+++ b/target/alpha/helper.h
@@ -92,6 +92,7 @@ DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_1(halt, void, i64)
+DEF_HELPER_1(whami, i64, env)
DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64)
DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64)
diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c
index 87e37605c13..0e0a619975b 100644
--- a/target/alpha/sys_helper.c
+++ b/target/alpha/sys_helper.c
@@ -67,3 +67,8 @@ void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
timer_del(cpu->alarm_timer);
}
}
+
+uint64_t HELPER(whami)(CPUAlphaState *env)
+{
+ return env_cpu(env)->cpu_index;
+}
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index a492520075e..b1d8a4eb80a 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -1126,8 +1126,7 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x3C:
/* WHAMI */
- tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env,
- -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
+ gen_helper_whami(ctx->ir[IR_V0], tcg_env);
break;
case 0x3E:
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 03/75] target/alpha: Replace VMSTATE_UINTTL() -> VMSTATE_UINT64()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 01/75] accel/tcg: Name gen_goto_tb()'s TB slot index as @tb_slot_idx Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 02/75] target/alpha: Access CPUState::cpu_index via helper Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 04/75] target/hppa: Use hwaddr type for HPPATLBEntry::pa Philippe Mathieu-Daudé
` (71 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
All these CPUAlphaState fields are of uint64_t type (except
the @fir[] array which uses float64, expanded to the same
type definition). Use the appropriate VMSTATE_UINT64() macro.
There is no functional change (the migration stream is not
modified), because the Alpha targets are only built as 64-bit:
$ git grep TARGET_LONG_BITS configs/targets/alpha*
configs/targets/alpha-linux-user.mak:4:TARGET_LONG_BITS=64
configs/targets/alpha-softmmu.mak:2:TARGET_LONG_BITS=64
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20250925005137.59378-1-philmd@linaro.org>
---
target/alpha/machine.c | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/target/alpha/machine.c b/target/alpha/machine.c
index 5f302b166da..6828b123ca1 100644
--- a/target/alpha/machine.c
+++ b/target/alpha/machine.c
@@ -25,8 +25,8 @@ static const VMStateInfo vmstate_fpcr = {
};
static const VMStateField vmstate_env_fields[] = {
- VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31),
- VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31),
+ VMSTATE_UINT64_ARRAY(ir, CPUAlphaState, 31),
+ VMSTATE_UINT64_ARRAY(fir, CPUAlphaState, 31),
/* Save the architecture value of the fpcr, not the internally
expanded version. Since this architecture value does not
exist in memory to be stored, this requires a but of hoop
@@ -41,27 +41,27 @@ static const VMStateField vmstate_env_fields[] = {
.flags = VMS_SINGLE,
.offset = 0
},
- VMSTATE_UINTTL(pc, CPUAlphaState),
- VMSTATE_UINTTL(unique, CPUAlphaState),
- VMSTATE_UINTTL(lock_addr, CPUAlphaState),
- VMSTATE_UINTTL(lock_value, CPUAlphaState),
+ VMSTATE_UINT64(pc, CPUAlphaState),
+ VMSTATE_UINT64(unique, CPUAlphaState),
+ VMSTATE_UINT64(lock_addr, CPUAlphaState),
+ VMSTATE_UINT64(lock_value, CPUAlphaState),
VMSTATE_UINT32(flags, CPUAlphaState),
VMSTATE_UINT32(pcc_ofs, CPUAlphaState),
- VMSTATE_UINTTL(trap_arg0, CPUAlphaState),
- VMSTATE_UINTTL(trap_arg1, CPUAlphaState),
- VMSTATE_UINTTL(trap_arg2, CPUAlphaState),
+ VMSTATE_UINT64(trap_arg0, CPUAlphaState),
+ VMSTATE_UINT64(trap_arg1, CPUAlphaState),
+ VMSTATE_UINT64(trap_arg2, CPUAlphaState),
- VMSTATE_UINTTL(exc_addr, CPUAlphaState),
- VMSTATE_UINTTL(palbr, CPUAlphaState),
- VMSTATE_UINTTL(ptbr, CPUAlphaState),
- VMSTATE_UINTTL(vptptr, CPUAlphaState),
- VMSTATE_UINTTL(sysval, CPUAlphaState),
- VMSTATE_UINTTL(usp, CPUAlphaState),
+ VMSTATE_UINT64(exc_addr, CPUAlphaState),
+ VMSTATE_UINT64(palbr, CPUAlphaState),
+ VMSTATE_UINT64(ptbr, CPUAlphaState),
+ VMSTATE_UINT64(vptptr, CPUAlphaState),
+ VMSTATE_UINT64(sysval, CPUAlphaState),
+ VMSTATE_UINT64(usp, CPUAlphaState),
- VMSTATE_UINTTL_ARRAY(shadow, CPUAlphaState, 8),
- VMSTATE_UINTTL_ARRAY(scratch, CPUAlphaState, 24),
+ VMSTATE_UINT64_ARRAY(shadow, CPUAlphaState, 8),
+ VMSTATE_UINT64_ARRAY(scratch, CPUAlphaState, 24),
VMSTATE_END_OF_LIST()
};
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 04/75] target/hppa: Use hwaddr type for HPPATLBEntry::pa
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (2 preceding siblings ...)
2025-10-16 12:14 ` [PULL 03/75] target/alpha: Replace VMSTATE_UINTTL() -> VMSTATE_UINT64() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 05/75] target/hppa: Have hppa_form_gva*() return vaddr type Philippe Mathieu-Daudé
` (70 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
HPPATLBEntry::@pa is a physical address, use the appropriate type.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-2-philmd@linaro.org>
---
target/hppa/cpu.h | 2 +-
target/hppa/trace-events | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 672ab3750c8..869a75876e2 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -187,7 +187,7 @@ typedef struct HPPATLBEntry {
struct HPPATLBEntry *unused_next;
};
- target_ulong pa;
+ hwaddr pa;
unsigned entry_valid : 1;
diff --git a/target/hppa/trace-events b/target/hppa/trace-events
index a10ba73d5d4..01761a4559b 100644
--- a/target/hppa/trace-events
+++ b/target/hppa/trace-events
@@ -1,13 +1,13 @@
# See docs/devel/tracing.rst for syntax documentation.
# mem_helper.c
-disable hppa_tlb_flush_ent(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
-disable hppa_tlb_find_entry(void *env, void *ent, int valid, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p valid=%d va_b=0x%lx va_e=0x%lx pa=0x%lx"
+disable hppa_tlb_flush_ent(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64
+disable hppa_tlb_find_entry(void *env, void *ent, int valid, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p valid=%d va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64
disable hppa_tlb_find_entry_not_found(void *env, uint64_t addr) "env=%p addr=%08lx"
disable hppa_tlb_get_physical_address(void *env, int ret, int prot, uint64_t addr, uint64_t phys) "env=%p ret=%d prot=%d addr=0x%lx phys=0x%lx"
disable hppa_tlb_fill_excp(void *env, uint64_t addr, int size, int type, int mmu_idx) "env=%p addr=0x%lx size=%d type=%d mmu_idx=%d"
disable hppa_tlb_fill_success(void *env, uint64_t addr, uint64_t phys, int size, int type, int mmu_idx) "env=%p addr=0x%lx phys=0x%lx size=%d type=%d mmu_idx=%d"
-disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
+disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64
disable hppa_tlb_itlbp(void *env, void *ent, int access_id, int u, int pl2, int pl1, int type, int b, int d, int t) "env=%p ent=%p access_id=%x u=%d pl2=%d pl1=%d type=%d b=%d d=%d t=%d"
disable hppa_tlb_ptlb(void *env) "env=%p"
disable hppa_tlb_ptlb_local(void *env) "env=%p"
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 05/75] target/hppa: Have hppa_form_gva*() return vaddr type
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (3 preceding siblings ...)
2025-10-16 12:14 ` [PULL 04/75] target/hppa: Use hwaddr type for HPPATLBEntry::pa Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 06/75] target/hppa: Explode MO_TExx -> MO_TE | MO_xx Philippe Mathieu-Daudé
` (69 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Return a 'vaddr' type for "guest virtual address".
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-3-philmd@linaro.org>
---
target/hppa/cpu.h | 8 ++++----
target/hppa/helper.c | 4 ++--
target/hppa/mem_helper.c | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 869a75876e2..c652ef945ac 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -320,8 +320,8 @@ void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
-static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask,
- uint64_t spc, target_ulong off)
+static inline vaddr hppa_form_gva_mask(uint64_t gva_offset_mask,
+ uint64_t spc, target_ulong off)
{
#ifdef CONFIG_USER_ONLY
return off & gva_offset_mask;
@@ -330,8 +330,8 @@ static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask,
#endif
}
-static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
- target_ulong off)
+static inline vaddr hppa_form_gva(CPUHPPAState *env, uint64_t spc,
+ target_ulong off)
{
return hppa_form_gva_mask(env->gva_offset_mask, spc, off);
}
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
index d7f8495d982..edcd2bf27c8 100644
--- a/target/hppa/helper.c
+++ b/target/hppa/helper.c
@@ -148,8 +148,8 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
m = UINT32_MAX;
}
- qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n"
- "IA_B %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n",
+ qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (0x%" VADDR_PRIx ")\n"
+ "IA_B %08" PRIx64 ":%0*" PRIx64 " (0x%" VADDR_PRIx ")\n",
env->iasq_f >> 32, w, m & env->iaoq_f,
hppa_form_gva_mask(env->gva_offset_mask, env->iasq_f,
env->iaoq_f),
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 9bdd0a6f23d..cce82e65999 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -803,7 +803,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f)
{
- uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
+ vaddr gva = hppa_form_gva(env, env->iasq_f, iaoq_f);
HPPATLBEntry *ent = hppa_find_tlb(env, gva);
if (ent == NULL) {
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 06/75] target/hppa: Explode MO_TExx -> MO_TE | MO_xx
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (4 preceding siblings ...)
2025-10-16 12:14 ` [PULL 05/75] target/hppa: Have hppa_form_gva*() return vaddr type Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 07/75] target/hppa: Conceal MO_TE within do_load() Philippe Mathieu-Daudé
` (68 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Extract the implicit MO_TE definition in order to replace
it in a commits.
Mechanical change using:
$ for n in UW UL UQ UO SW SL SQ; do \
sed -i -e "s/MO_TE$n/MO_TE | MO_$n/" \
$(git grep -l MO_TE$n target/openrisc); \
done
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-4-philmd@linaro.org>
---
target/hppa/translate.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 7a81cfcb887..859c6cf5f9b 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -1691,7 +1691,7 @@ static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = tcg_temp_new_i32();
- do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
+ do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UL);
save_frw_i32(rt, tmp);
if (rt == 0) {
@@ -1716,7 +1716,7 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UQ);
save_frd(rt, tmp);
if (rt == 0) {
@@ -1750,7 +1750,7 @@ static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = load_frw_i32(rt);
- do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
+ do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UL);
return nullify_end(ctx);
}
@@ -1770,7 +1770,7 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = load_frd(rt);
- do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UQ);
return nullify_end(ctx);
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 07/75] target/hppa: Conceal MO_TE within do_load()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (5 preceding siblings ...)
2025-10-16 12:14 ` [PULL 06/75] target/hppa: Explode MO_TExx -> MO_TE | MO_xx Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 08/75] target/hppa: Conceal MO_TE within do_load_32/64() Philippe Mathieu-Daudé
` (67 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-5-philmd@linaro.org>
---
target/hppa/translate.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 859c6cf5f9b..ee0c874342c 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -1676,7 +1676,7 @@ static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
/* Make sure if RT == RB, we see the result of the load. */
dest = tcg_temp_new_i64();
}
- do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
+ do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, MO_TE | mop);
save_gpr(ctx, rt, dest);
return nullify_end(ctx);
@@ -3302,7 +3302,7 @@ static bool trans_ld(DisasContext *ctx, arg_ldst *a)
return gen_illegal(ctx);
}
return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
- a->disp, a->sp, a->m, a->size | MO_TE);
+ a->disp, a->sp, a->m, a->size);
}
static bool trans_st(DisasContext *ctx, arg_ldst *a)
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 08/75] target/hppa: Conceal MO_TE within do_load_32/64()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (6 preceding siblings ...)
2025-10-16 12:14 ` [PULL 07/75] target/hppa: Conceal MO_TE within do_load() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 09/75] target/hppa: Conceal MO_TE within do_store() Philippe Mathieu-Daudé
` (66 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-6-philmd@linaro.org>
---
target/hppa/translate.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index ee0c874342c..4680d826345 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -1599,6 +1599,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= MO_TE;
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1617,6 +1618,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= MO_TE;
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1676,7 +1678,7 @@ static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
/* Make sure if RT == RB, we see the result of the load. */
dest = tcg_temp_new_i64();
}
- do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, MO_TE | mop);
+ do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
save_gpr(ctx, rt, dest);
return nullify_end(ctx);
@@ -1691,7 +1693,7 @@ static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = tcg_temp_new_i32();
- do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UL);
+ do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL);
save_frw_i32(rt, tmp);
if (rt == 0) {
@@ -1716,7 +1718,7 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UQ);
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ);
save_frd(rt, tmp);
if (rt == 0) {
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 09/75] target/hppa: Conceal MO_TE within do_store()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (7 preceding siblings ...)
2025-10-16 12:14 ` [PULL 08/75] target/hppa: Conceal MO_TE within do_load_32/64() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 10/75] target/hppa: Conceal MO_TE within do_store_32/64() Philippe Mathieu-Daudé
` (65 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-7-philmd@linaro.org>
---
target/hppa/translate.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 4680d826345..3f78bb2a76a 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -1739,7 +1739,7 @@ static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
int modify, MemOp mop)
{
nullify_over(ctx);
- do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
+ do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, MO_TE | mop);
return nullify_end(ctx);
}
@@ -3313,7 +3313,7 @@ static bool trans_st(DisasContext *ctx, arg_ldst *a)
if (!ctx->is_pa20 && a->size > MO_32) {
return gen_illegal(ctx);
}
- return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
+ return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size);
}
static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 10/75] target/hppa: Conceal MO_TE within do_store_32/64()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (8 preceding siblings ...)
2025-10-16 12:14 ` [PULL 09/75] target/hppa: Conceal MO_TE within do_store() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 11/75] target/hppa: Introduce mo_endian() helper Philippe Mathieu-Daudé
` (64 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-8-philmd@linaro.org>
---
target/hppa/translate.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 3f78bb2a76a..92071affe47 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -1637,6 +1637,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= MO_TE;
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1655,6 +1656,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
+ mop |= MO_TE;
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1739,7 +1741,7 @@ static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
int modify, MemOp mop)
{
nullify_over(ctx);
- do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, MO_TE | mop);
+ do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
return nullify_end(ctx);
}
@@ -1752,7 +1754,7 @@ static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = load_frw_i32(rt);
- do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UL);
+ do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL);
return nullify_end(ctx);
}
@@ -1772,7 +1774,7 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = load_frd(rt);
- do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TE | MO_UQ);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ);
return nullify_end(ctx);
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 11/75] target/hppa: Introduce mo_endian() helper
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (9 preceding siblings ...)
2025-10-16 12:14 ` [PULL 10/75] target/hppa: Conceal MO_TE within do_store_32/64() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 12/75] target/hppa: Replace MO_TE -> MO_BE Philippe Mathieu-Daudé
` (63 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
mo_endian() returns the target endianness.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-9-philmd@linaro.org>
---
target/hppa/translate.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 92071affe47..6fec63cb433 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -104,6 +104,11 @@ typedef struct DisasContext {
#define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
#endif
+static inline MemOp mo_endian(DisasContext *ctx)
+{
+ return MO_TE;
+}
+
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
static int expand_sm_imm(DisasContext *ctx, int val)
{
@@ -1599,7 +1604,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1618,7 +1623,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1637,7 +1642,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -1656,7 +1661,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
MMU_DISABLED(ctx));
tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
@@ -3320,7 +3325,7 @@ static bool trans_st(DisasContext *ctx, arg_ldst *a)
static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
{
- MemOp mop = MO_TE | MO_ALIGN | a->size;
+ MemOp mop = mo_endian(ctx) | MO_ALIGN | a->size;
TCGv_i64 dest, ofs;
TCGv_i64 addr;
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 12/75] target/hppa: Replace MO_TE -> MO_BE
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (10 preceding siblings ...)
2025-10-16 12:14 ` [PULL 11/75] target/hppa: Introduce mo_endian() helper Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 13/75] target/hppa: correct size bit parity for fmpyadd Philippe Mathieu-Daudé
` (62 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
We only build the PA-RISC targets using big endianness order:
$ git grep TARGET_BIG_ENDIAN configs/targets/hppa-*
configs/targets/hppa-linux-user.mak:5:TARGET_BIG_ENDIAN=y
configs/targets/hppa-softmmu.mak:2:TARGET_BIG_ENDIAN=y
Therefore the MO_TE definition always expands to MO_BE. Use the
latter to simplify.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009101040.18378-10-philmd@linaro.org>
---
target/hppa/translate.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 6fec63cb433..853cba2ba4f 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -106,7 +106,8 @@ typedef struct DisasContext {
static inline MemOp mo_endian(DisasContext *ctx)
{
- return MO_TE;
+ /* The PSW_E bit sets the (little) endianness, but we don't implement it. */
+ return MO_BE;
}
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 13/75] target/hppa: correct size bit parity for fmpyadd
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (11 preceding siblings ...)
2025-10-16 12:14 ` [PULL 12/75] target/hppa: Replace MO_TE -> MO_BE Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 14/75] target/loongarch: Replace VMSTATE_UINTTL() -> VMSTATE_UINT64() Philippe Mathieu-Daudé
` (61 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
From: Gabriel Brookman <brookmangabriel@gmail.com>
For the fmpyadd instruction on the hppa architecture, there is a bit
used to specify whether the instruction is operating on a 32 bit or
64 bit floating point register. For most instructions, such a bit is 0
when operating on the smaller register and 1 when operating on the
larger register. However, according to page 6-57 of the PA-RISC 1.1
Architecture and Instruction Set Reference Manual, this convention is
reversed for the fmpyadd instruction specifically, meaning the bit is
1 for operations on 32 bit registers and 0 for 64 bit registers. See
also page 6-18 (fig. 6-8) and 6-19 (table 6-16), where the f field
for FMPYADD and FMPYSUB is documented. Previously, QEMU decoded this
operation as operating on the other size of register, leading to bugs
when translating the fmpyadd instruction. This patch fixes that issue.
Reported-by: Andreas Hüttel <andreas.huettel@ur.de>
Signed-off-by: Gabriel Brookman <brookmangabriel@gmail.com>
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/3096
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Acked-by: Helge Deller <deller@gmx.de>
Message-ID: <20251009-hppa-correct-fmpyadd-size-bit-decoding-v1-1-f63bb6c3290c@gmail.com>
[PMD: Add documentation refs mentioned by Andreas K. Huettel]
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
target/hppa/insns.decode | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode
index 4eaac750ea8..13c6a55bf2a 100644
--- a/target/hppa/insns.decode
+++ b/target/hppa/insns.decode
@@ -365,10 +365,10 @@ fstd 011100 ..... ..... .. ............1. @ldstim11
&mpyadd rm1 rm2 ta ra tm
@mpyadd ...... rm1:5 rm2:5 ta:5 ra:5 . tm:5 &mpyadd
-fmpyadd_f 000110 ..... ..... ..... ..... 0 ..... @mpyadd
-fmpyadd_d 000110 ..... ..... ..... ..... 1 ..... @mpyadd
-fmpysub_f 100110 ..... ..... ..... ..... 0 ..... @mpyadd
-fmpysub_d 100110 ..... ..... ..... ..... 1 ..... @mpyadd
+fmpyadd_f 000110 ..... ..... ..... ..... 1 ..... @mpyadd
+fmpyadd_d 000110 ..... ..... ..... ..... 0 ..... @mpyadd
+fmpysub_f 100110 ..... ..... ..... ..... 1 ..... @mpyadd
+fmpysub_d 100110 ..... ..... ..... ..... 0 ..... @mpyadd
####
# Conditional Branches
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 14/75] target/loongarch: Replace VMSTATE_UINTTL() -> VMSTATE_UINT64()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (12 preceding siblings ...)
2025-10-16 12:14 ` [PULL 13/75] target/hppa: correct size bit parity for fmpyadd Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 15/75] target/loongarch: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
` (60 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
All LoongArchCPU::pc and LoongArchCPU::gpr[] fields are of
uint64_t type. Use the appropriate VMSTATE_UINT64() macro.
There is no functional change (the migration stream is not
modified), because the LoongArch targets are only built as 64-bit:
$ git grep TARGET_LONG_BITS configs/targets/loongarch64*
configs/targets/loongarch64-linux-user.mak:7:TARGET_LONG_BITS=64
configs/targets/loongarch64-softmmu.mak:7:TARGET_LONG_BITS=64
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Song Gao <gaosong@loongson.cn>
Message-Id: <20250925004327.58764-1-philmd@linaro.org>
---
target/loongarch/machine.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c
index 73190fb3672..0366a507637 100644
--- a/target/loongarch/machine.c
+++ b/target/loongarch/machine.c
@@ -191,8 +191,8 @@ const VMStateDescription vmstate_loongarch_cpu = {
.version_id = 4,
.minimum_version_id = 4,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32),
- VMSTATE_UINTTL(env.pc, LoongArchCPU),
+ VMSTATE_UINT64_ARRAY(env.gpr, LoongArchCPU, 32),
+ VMSTATE_UINT64(env.pc, LoongArchCPU),
/* Remaining CSRs */
VMSTATE_UINT64(env.CSR_CRMD, LoongArchCPU),
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 15/75] target/loongarch: Remove target_ulong use in gen_goto_tb()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (13 preceding siblings ...)
2025-10-16 12:14 ` [PULL 14/75] target/loongarch: Replace VMSTATE_UINTTL() -> VMSTATE_UINT64() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 16/75] target/loongarch: Remove target_ulong use in gdb_write_register handler Philippe Mathieu-Daudé
` (59 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
translator_use_goto_tb() expects a vaddr type since commit
b1c09220b4c ("accel/tcg: Replace target_ulong with vaddr in
translator*()").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Song Gao <gaosong@loongson.cn>
Message-Id: <20251009201947.34643-3-philmd@linaro.org>
---
target/loongarch/tcg/translate.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
index 57c3b8c4bb2..055f6fb6046 100644
--- a/target/loongarch/tcg/translate.c
+++ b/target/loongarch/tcg/translate.c
@@ -99,8 +99,7 @@ void generate_exception(DisasContext *ctx, int excp)
ctx->base.is_jmp = DISAS_NORETURN;
}
-static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
- target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, vaddr dest)
{
if (ctx->va32) {
dest = (uint32_t) dest;
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 16/75] target/loongarch: Remove target_ulong use in gdb_write_register handler
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (14 preceding siblings ...)
2025-10-16 12:14 ` [PULL 15/75] target/loongarch: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 17/75] target/loongarch: Do not use target_ulong type for LDDIR level Philippe Mathieu-Daudé
` (58 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
ldq_le_p() returns a uint64_t type, big enough to also hold
ldl_le_p() return value. If we were building for a 32-bit
LoongArch target, ldq_le_p() would not fit in target_ulong.
Better stick to plain uint64_t.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Song Gao <gaosong@loongson.cn>
Message-Id: <20251009201947.34643-4-philmd@linaro.org>
---
target/loongarch/gdbstub.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c
index 471eda28c73..23a5eecc20b 100644
--- a/target/loongarch/gdbstub.c
+++ b/target/loongarch/gdbstub.c
@@ -62,7 +62,7 @@ int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
CPULoongArchState *env = cpu_env(cs);
- target_ulong tmp;
+ uint64_t tmp;
int length = 0;
if (n < 0 || n > 34) {
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 17/75] target/loongarch: Do not use target_ulong type for LDDIR level
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (15 preceding siblings ...)
2025-10-16 12:14 ` [PULL 16/75] target/loongarch: Remove target_ulong use in gdb_write_register handler Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 18/75] target/m68k: Remove unused @cpu_exception_index TCGv Philippe Mathieu-Daudé
` (57 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
The LDDIR level page table is a 5-bit immediate. Using the
uint32_t type for it is sufficient. Avoid the target_ulong type.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Song Gao <gaosong@loongson.cn>
Message-Id: <20251009201947.34643-5-philmd@linaro.org>
---
target/loongarch/cpu-mmu.h | 2 +-
target/loongarch/tcg/helper.h | 2 +-
target/loongarch/cpu_helper.c | 2 +-
target/loongarch/tcg/tlb_helper.c | 4 ++--
target/loongarch/tcg/insn_trans/trans_privileged.c.inc | 2 +-
5 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 0068d22efcb..dbc69c7c0f2 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -34,7 +34,7 @@ TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context,
MMUAccessType access_type, int mmu_idx,
int is_debug);
void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
- uint64_t *dir_width, target_ulong level);
+ uint64_t *dir_width, unsigned int level);
hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
#endif /* LOONGARCH_CPU_MMU_H */
diff --git a/target/loongarch/tcg/helper.h b/target/loongarch/tcg/helper.h
index db57dbfc167..7e508c5a7b9 100644
--- a/target/loongarch/tcg/helper.h
+++ b/target/loongarch/tcg/helper.h
@@ -129,7 +129,7 @@ DEF_HELPER_2(invtlb_all_asid, void, env, tl)
DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl)
DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl)
-DEF_HELPER_4(lddir, tl, env, tl, tl, i32)
+DEF_HELPER_4(lddir, tl, env, tl, i32, i32)
DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
DEF_HELPER_1(ertn, void, env)
DEF_HELPER_1(idle, void, env)
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index 4a9db3ea4c1..867e7c88670 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -17,7 +17,7 @@
#include "tcg/tcg_loongarch.h"
void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
- uint64_t *dir_width, target_ulong level)
+ uint64_t *dir_width, unsigned int level)
{
switch (level) {
case 1:
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index f1d183cb641..e119f78d925 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -599,7 +599,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
- target_ulong level, uint32_t mem_idx)
+ uint32_t level, uint32_t mem_idx)
{
CPUState *cs = env_cpu(env);
target_ulong badvaddr, index, phys;
@@ -607,7 +607,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
if (unlikely((level == 0) || (level > 4))) {
qemu_log_mask(LOG_GUEST_ERROR,
- "Attepted LDDIR with level %"PRId64"\n", level);
+ "Attepted LDDIR with level %u\n", level);
return base;
}
diff --git a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
index a407ab51b74..64e53a44606 100644
--- a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc
@@ -380,7 +380,7 @@ static bool trans_lddir(DisasContext *ctx, arg_lddir *a)
if (check_plv(ctx)) {
return false;
}
- gen_helper_lddir(dest, tcg_env, src, tcg_constant_tl(a->imm), mem_idx);
+ gen_helper_lddir(dest, tcg_env, src, tcg_constant_i32(a->imm), mem_idx);
return true;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 18/75] target/m68k: Remove unused @cpu_exception_index TCGv
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (16 preceding siblings ...)
2025-10-16 12:14 ` [PULL 17/75] target/loongarch: Do not use target_ulong type for LDDIR level Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 19/75] target/m68k: Remove pointless @cpu_halted TCGv Philippe Mathieu-Daudé
` (56 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
When moving the @exception_index from each target ArchCPU
to the global CPUState in commit 27103424c40 ("cpu: Move
exception_index field from CPU_COMMON to CPUState"), the
@cpu_exception_index TCGv has been created for m68k target.
For years, no code ever used this register. Simply remove it.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20250924171308.53036-1-philmd@linaro.org>
---
target/m68k/translate.c | 6 ------
1 file changed, 6 deletions(-)
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 97afceb1297..0cee54f4900 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -45,7 +45,6 @@
#undef DEFO64
static TCGv_i32 cpu_halted;
-static TCGv_i32 cpu_exception_index;
static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
static TCGv cpu_dregs[8];
@@ -81,11 +80,6 @@ void m68k_tcg_init(void)
cpu_halted = tcg_global_mem_new_i32(tcg_env,
-offsetof(M68kCPU, env) +
offsetof(CPUState, halted), "HALTED");
- cpu_exception_index = tcg_global_mem_new_i32(tcg_env,
- -offsetof(M68kCPU, env) +
- offsetof(CPUState, exception_index),
- "EXCEPTION");
-
p = cpu_reg_names;
for (i = 0; i < 8; i++) {
sprintf(p, "D%d", i);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 19/75] target/m68k: Remove pointless @cpu_halted TCGv
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (17 preceding siblings ...)
2025-10-16 12:14 ` [PULL 18/75] target/m68k: Remove unused @cpu_exception_index TCGv Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 20/75] target/microblaze: Remove target_ulong use in cpu_handle_mmu_fault() Philippe Mathieu-Daudé
` (55 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Avoid registering a TCGv to write the generic CPUState::halted
field. Access it directly via @env in both STOP / HALT opcodes.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20250925012454.60602-1-philmd@linaro.org>
---
target/m68k/translate.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 0cee54f4900..eb1ba150745 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -44,8 +44,6 @@
#undef DEFO32
#undef DEFO64
-static TCGv_i32 cpu_halted;
-
static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
static TCGv cpu_dregs[8];
static TCGv cpu_aregs[8];
@@ -77,9 +75,6 @@ void m68k_tcg_init(void)
#undef DEFO32
#undef DEFO64
- cpu_halted = tcg_global_mem_new_i32(tcg_env,
- -offsetof(M68kCPU, env) +
- offsetof(CPUState, halted), "HALTED");
p = cpu_reg_names;
for (i = 0; i < 8; i++) {
sprintf(p, "D%d", i);
@@ -4506,7 +4501,8 @@ DISAS_INSN(halt)
gen_exception(s, s->pc, EXCP_SEMIHOSTING);
return;
}
- tcg_gen_movi_i32(cpu_halted, 1);
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
+ offsetof(CPUState, halted) - offsetof(M68kCPU, env));
gen_exception(s, s->pc, EXCP_HLT);
}
@@ -4522,7 +4518,8 @@ DISAS_INSN(stop)
ext = read_im16(env, s);
gen_set_sr_im(s, ext, 0);
- tcg_gen_movi_i32(cpu_halted, 1);
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
+ offsetof(CPUState, halted) - offsetof(M68kCPU, env));
gen_exception(s, s->pc, EXCP_HLT);
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 20/75] target/microblaze: Remove target_ulong use in cpu_handle_mmu_fault()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (18 preceding siblings ...)
2025-10-16 12:14 ` [PULL 19/75] target/m68k: Remove pointless @cpu_halted TCGv Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 21/75] target/microblaze: Remove target_ulong uses in get_phys_page_attrs_debug Philippe Mathieu-Daudé
` (54 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
cpu_handle_mmu_fault() -- renamed in commit f429d607c71 -- expects
a vaddr type for its address argument since commit 7510454e3e7
("cpu: Turn cpu_handle_mmu_fault() into a CPUClass hook").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251015180115.97493-2-philmd@linaro.org>
---
target/microblaze/mmu.h | 2 +-
target/microblaze/mmu.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
index 1068bd2d52b..2aca39c923b 100644
--- a/target/microblaze/mmu.h
+++ b/target/microblaze/mmu.h
@@ -86,7 +86,7 @@ typedef struct {
} MicroBlazeMMULookup;
unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
- target_ulong vaddr, MMUAccessType rw, int mmu_idx);
+ vaddr vaddr, MMUAccessType rw, int mmu_idx);
uint32_t mmu_read(CPUMBState *env, bool ea, uint32_t rn);
void mmu_write(CPUMBState *env, bool ea, uint32_t rn, uint32_t v);
void mmu_init(MicroBlazeMMU *mmu);
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 8703ff5c657..db24cb399ce 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -78,7 +78,7 @@ static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
/* rw - 0 = read, 1 = write, 2 = fetch. */
unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
- target_ulong vaddr, MMUAccessType rw, int mmu_idx)
+ vaddr vaddr, MMUAccessType rw, int mmu_idx)
{
MicroBlazeMMU *mmu = &cpu->env.mmu;
unsigned int i, hit = 0;
@@ -172,7 +172,7 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
}
done:
qemu_log_mask(CPU_LOG_MMU,
- "MMU vaddr=0x" TARGET_FMT_lx
+ "MMU vaddr=0x%" VADDR_PRIx
" rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
vaddr, rw, tlb_wr, tlb_ex, hit);
return hit;
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 21/75] target/microblaze: Remove target_ulong uses in get_phys_page_attrs_debug
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (19 preceding siblings ...)
2025-10-16 12:14 ` [PULL 20/75] target/microblaze: Remove target_ulong use in cpu_handle_mmu_fault() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 22/75] target/microblaze: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
` (53 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
The CPUClass::get_phys_page_debug() handler takes a 'vaddr' address
and return a 'hwaddr' type since commit 00b941e581b ("cpu: Turn
cpu_get_phys_page_debug() into a CPUClass hook").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251015180115.97493-3-philmd@linaro.org>
---
target/microblaze/helper.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index ef0e2f973fa..cf577a72268 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -274,7 +274,8 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
MemTxAttrs *attrs)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- target_ulong vaddr, paddr = 0;
+ vaddr vaddr;
+ hwaddr paddr = 0;
MicroBlazeMMULookup lu;
int mmu_idx = cpu_mmu_index(cs, false);
unsigned int hit;
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 22/75] target/microblaze: Remove target_ulong use in gen_goto_tb()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (20 preceding siblings ...)
2025-10-16 12:14 ` [PULL 21/75] target/microblaze: Remove target_ulong uses in get_phys_page_attrs_debug Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 23/75] target/microblaze: Remove target_ulong use in helper_stackprot() Philippe Mathieu-Daudé
` (52 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
translator_use_goto_tb() expects a vaddr type since commit
b1c09220b4c ("accel/tcg: Replace target_ulong with vaddr in
translator_*()").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251015180115.97493-4-philmd@linaro.org>
---
target/microblaze/translate.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 3dd74b021e8..b93a40fedbc 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -116,8 +116,7 @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
gen_raise_exception_sync(dc, EXCP_HW_EXCP);
}
-static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx,
- target_ulong dest)
+static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
{
if (translator_use_goto_tb(&dc->base, dest)) {
tcg_gen_goto_tb(tb_slot_idx);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 23/75] target/microblaze: Remove target_ulong use in helper_stackprot()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (21 preceding siblings ...)
2025-10-16 12:14 ` [PULL 22/75] target/microblaze: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 24/75] target/microblaze: Have compute_ldst_addr_type[ab] return TCGv_i32 Philippe Mathieu-Daudé
` (51 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Since commit 36a9529e60e ("target/microblaze: Simplify
compute_ldst_addr_type{a,b}"), helper_stackprot() takes
a TCGv_i32 argument.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251015180115.97493-5-philmd@linaro.org>
---
target/microblaze/helper.h | 2 +-
target/microblaze/op_helper.c | 4 ++--
target/microblaze/translate.c | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index ef4fad9b91e..01eba592b26 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -20,7 +20,7 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, i32)
DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32)
DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32)
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index b8365b3b1d2..df93c4229d6 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -365,13 +365,13 @@ uint32_t helper_pcmpbf(uint32_t a, uint32_t b)
return 0;
}
-void helper_stackprot(CPUMBState *env, target_ulong addr)
+void helper_stackprot(CPUMBState *env, uint32_t addr)
{
if (addr < env->slr || addr > env->shr) {
CPUState *cs = env_cpu(env);
qemu_log_mask(CPU_LOG_INT, "Stack protector violation at "
- TARGET_FMT_lx " %x %x\n",
+ "0x%x 0x%x 0x%x\n",
addr, env->slr, env->shr);
env->ear = addr;
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index b93a40fedbc..994e1d5cef0 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -606,7 +606,7 @@ DO_TYPEBI(xori, false, tcg_gen_xori_i32)
static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
{
- TCGv ret;
+ TCGv_i32 ret;
/* If any of the regs is r0, set t to the value of the other reg. */
if (ra && rb) {
@@ -628,7 +628,7 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
{
- TCGv ret;
+ TCGv_i32 ret;
/* If any of the regs is r0, set t to the value of the other reg. */
if (ra && imm) {
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 24/75] target/microblaze: Have compute_ldst_addr_type[ab] return TCGv_i32
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (22 preceding siblings ...)
2025-10-16 12:14 ` [PULL 23/75] target/microblaze: Remove target_ulong use in helper_stackprot() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 25/75] target/microblaze: Have do_load/store() take a TCGv_i32 address argument Philippe Mathieu-Daudé
` (50 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Both compute_ldst_addr_typea() and compute_ldst_addr_typeb()
bodies use a TCGv_i32, so return the same type.
Suggested-by: Anton Johansson <anjo@rev.ng>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251015180115.97493-6-philmd@linaro.org>
---
target/microblaze/translate.c | 48 +++++++++++++++++------------------
1 file changed, 24 insertions(+), 24 deletions(-)
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 994e1d5cef0..1554b9e67b0 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -604,7 +604,7 @@ static bool trans_wdic(DisasContext *dc, arg_wdic *a)
DO_TYPEA(xor, false, tcg_gen_xor_i32)
DO_TYPEBI(xori, false, tcg_gen_xori_i32)
-static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
+static TCGv_i32 compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
{
TCGv_i32 ret;
@@ -626,7 +626,7 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
return ret;
}
-static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
+static TCGv_i32 compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
{
TCGv_i32 ret;
@@ -750,13 +750,13 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
static bool trans_lbu(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_lbur(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
}
@@ -776,19 +776,19 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_lhu(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_lhur(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
}
@@ -810,19 +810,19 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_lw(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_lwr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
}
@@ -844,16 +844,16 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg)
static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_lwx(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
/* lwx does not throw unaligned access errors, so force alignment */
- tcg_gen_andi_tl(addr, addr, ~3);
+ tcg_gen_andi_i32(addr, addr, ~3);
tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
mo_endian(dc) | MO_UL);
@@ -910,13 +910,13 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
static bool trans_sb(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_sbr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
}
@@ -936,19 +936,19 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg)
static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
}
static bool trans_sh(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_shr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true);
}
@@ -970,19 +970,19 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg)
static bool trans_shi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false);
}
static bool trans_sw(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_swr(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true);
}
@@ -1004,19 +1004,19 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg)
static bool trans_swi(DisasContext *dc, arg_typeb *arg)
{
- TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false);
}
static bool trans_swx(DisasContext *dc, arg_typea *arg)
{
- TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
TCGLabel *swx_done = gen_new_label();
TCGLabel *swx_fail = gen_new_label();
TCGv_i32 tval;
/* swx does not throw unaligned access errors, so force alignment */
- tcg_gen_andi_tl(addr, addr, ~3);
+ tcg_gen_andi_i32(addr, addr, ~3);
/*
* Compare the address vs the one we used during lwx.
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 25/75] target/microblaze: Have do_load/store() take a TCGv_i32 address argument
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (23 preceding siblings ...)
2025-10-16 12:14 ` [PULL 24/75] target/microblaze: Have compute_ldst_addr_type[ab] return TCGv_i32 Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 26/75] target/microblaze: Convert CPUMBState::res_addr field to uint32_t type Philippe Mathieu-Daudé
` (49 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
All callers of do_load() and do_store() pass a TCGv_i32 address
type, have both functions take a TCGv_i32.
Suggested-by: Anton Johansson <anjo@rev.ng>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251015180115.97493-7-philmd@linaro.org>
---
target/microblaze/translate.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 1554b9e67b0..ed53848bad5 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -708,7 +708,7 @@ static inline MemOp mo_endian(DisasContext *dc)
return dc->cfg->endi ? MO_LE : MO_BE;
}
-static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+static bool do_load(DisasContext *dc, int rd, TCGv_i32 addr, MemOp mop,
int mem_index, bool rev)
{
MemOp size = mop & MO_SIZE;
@@ -726,7 +726,7 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
mop ^= MO_BSWAP;
}
if (size < MO_32) {
- tcg_gen_xori_tl(addr, addr, 3 - size);
+ tcg_gen_xori_i32(addr, addr, 3 - size);
}
}
@@ -868,7 +868,7 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg)
return true;
}
-static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+static bool do_store(DisasContext *dc, int rd, TCGv_i32 addr, MemOp mop,
int mem_index, bool rev)
{
MemOp size = mop & MO_SIZE;
@@ -886,7 +886,7 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
mop ^= MO_BSWAP;
}
if (size < MO_32) {
- tcg_gen_xori_tl(addr, addr, 3 - size);
+ tcg_gen_xori_i32(addr, addr, 3 - size);
}
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 26/75] target/microblaze: Convert CPUMBState::res_addr field to uint32_t type
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (24 preceding siblings ...)
2025-10-16 12:14 ` [PULL 25/75] target/microblaze: Have do_load/store() take a TCGv_i32 address argument Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 27/75] target/openrisc: Replace VMSTATE_UINTTL() -> VMSTATE_UINT32() Philippe Mathieu-Daudé
` (48 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
CPUMBState::@res_addr field is used as u32 since commit
cfeea807e5a ("target-microblaze: Tighten up TCGv_i32 vs
TCGv type usage"). Convert it as such, bumping the migration
version. Use the RES_ADDR_NONE definition when appropriate.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251015180115.97493-8-philmd@linaro.org>
---
target/microblaze/cpu.h | 2 +-
target/microblaze/machine.c | 6 +++---
target/microblaze/translate.c | 17 +++++++++--------
3 files changed, 13 insertions(+), 12 deletions(-)
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 3ce28b302fe..14b107876a4 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -259,7 +259,7 @@ struct CPUArchState {
/* lwx/swx reserved address */
#define RES_ADDR_NONE 0xffffffff /* Use 0xffffffff to indicate no reservation */
- target_ulong res_addr;
+ uint32_t res_addr;
uint32_t res_val;
/* Internal flags. */
diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c
index a4cf38dc891..48efa546d39 100644
--- a/target/microblaze/machine.c
+++ b/target/microblaze/machine.c
@@ -78,7 +78,7 @@ static const VMStateField vmstate_env_fields[] = {
VMSTATE_UINT32(iflags, CPUMBState),
VMSTATE_UINT32(res_val, CPUMBState),
- VMSTATE_UINTTL(res_addr, CPUMBState),
+ VMSTATE_UINT32(res_addr, CPUMBState),
VMSTATE_STRUCT(mmu, CPUMBState, 0, vmstate_mmu, MicroBlazeMMU),
@@ -87,8 +87,8 @@ static const VMStateField vmstate_env_fields[] = {
static const VMStateDescription vmstate_env = {
.name = "env",
- .version_id = 0,
- .minimum_version_id = 0,
+ .version_id = 1,
+ .minimum_version_id = 1,
.fields = vmstate_env_fields,
};
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index ed53848bad5..6442a250c5d 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -55,7 +55,7 @@ static TCGv_i32 cpu_imm;
static TCGv_i32 cpu_bvalue;
static TCGv_i32 cpu_btarget;
static TCGv_i32 cpu_iflags;
-static TCGv cpu_res_addr;
+static TCGv_i32 cpu_res_addr;
static TCGv_i32 cpu_res_val;
/* This is the state at translation time. */
@@ -857,7 +857,7 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg)
tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index,
mo_endian(dc) | MO_UL);
- tcg_gen_mov_tl(cpu_res_addr, addr);
+ tcg_gen_mov_i32(cpu_res_addr, addr);
if (arg->rd) {
tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
@@ -1024,7 +1024,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg)
* branch, but we know we can use the equal version in the global.
* In either case, addr is no longer needed.
*/
- tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
+ tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
/*
* Compare the value loaded during lwx with current contents of
@@ -1052,7 +1052,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg)
* Prevent the saved address from working again without another ldx.
* Akin to the pseudocode setting reservation = 0.
*/
- tcg_gen_movi_tl(cpu_res_addr, -1);
+ tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE);
return true;
}
@@ -1173,7 +1173,7 @@ static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
}
tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
- tcg_gen_movi_tl(cpu_res_addr, -1);
+ tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE);
dc->base.is_jmp = DISAS_EXIT;
return true;
@@ -1194,7 +1194,7 @@ static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
if (arg->rd) {
tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
}
- tcg_gen_movi_tl(cpu_res_addr, -1);
+ tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE);
#ifdef CONFIG_USER_ONLY
switch (imm) {
@@ -1885,6 +1885,7 @@ void mb_tcg_init(void)
tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
}
- cpu_res_addr =
- tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
+ cpu_res_addr = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUMBState, res_addr),
+ "res_addr");
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 27/75] target/openrisc: Replace VMSTATE_UINTTL() -> VMSTATE_UINT32()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (25 preceding siblings ...)
2025-10-16 12:14 ` [PULL 26/75] target/microblaze: Convert CPUMBState::res_addr field to uint32_t type Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 28/75] target/openrisc: Do not use target_ulong for @mr in MTSPR helper Philippe Mathieu-Daudé
` (47 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Both OpenRISCTLBEntry fields are of uint32_t type. Use the
appropriate VMSTATE_UINT32() macro.
There is no functional change (the migration stream is not
modified), because the OpenRISC targets are only built as 32-bit:
$ git grep TARGET_LONG_BITS configs/targets/or1k-*
configs/targets/or1k-linux-user.mak:5:TARGET_LONG_BITS=32
configs/targets/or1k-softmmu.mak:5:TARGET_LONG_BITS=32
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-2-philmd@linaro.org>
---
target/openrisc/machine.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
index 081c706d02c..fa054e528bd 100644
--- a/target/openrisc/machine.c
+++ b/target/openrisc/machine.c
@@ -26,8 +26,8 @@ static const VMStateDescription vmstate_tlb_entry = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL(mr, OpenRISCTLBEntry),
- VMSTATE_UINTTL(tr, OpenRISCTLBEntry),
+ VMSTATE_UINT32(mr, OpenRISCTLBEntry),
+ VMSTATE_UINT32(tr, OpenRISCTLBEntry),
VMSTATE_END_OF_LIST()
}
};
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 28/75] target/openrisc: Do not use target_ulong for @mr in MTSPR helper
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (26 preceding siblings ...)
2025-10-16 12:14 ` [PULL 27/75] target/openrisc: Replace VMSTATE_UINTTL() -> VMSTATE_UINT32() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 29/75] target/openrisc: Remove unused cpu_openrisc_map_address_*() handlers Philippe Mathieu-Daudé
` (46 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
OpenRISCTLBEntry::@mr field is a uint32_t type since its
introduction in commit 726fe045720 ("target-or32: Add MMU support").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-3-philmd@linaro.org>
---
target/openrisc/sys_helper.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
index b091a9c6685..ad59939db3b 100644
--- a/target/openrisc/sys_helper.c
+++ b/target/openrisc/sys_helper.c
@@ -45,7 +45,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
OpenRISCCPU *cpu = env_archcpu(env);
#ifndef CONFIG_USER_ONLY
CPUState *cs = env_cpu(env);
- target_ulong mr;
+ uint32_t mr;
int idx;
#endif
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 29/75] target/openrisc: Remove unused cpu_openrisc_map_address_*() handlers
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (27 preceding siblings ...)
2025-10-16 12:14 ` [PULL 28/75] target/openrisc: Do not use target_ulong for @mr in MTSPR helper Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 30/75] target/openrisc: Remove target_ulong use in raise_mmu_exception() Philippe Mathieu-Daudé
` (45 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Commit 23d45ebdb19 ("target/openrisc: Remove indirect
function calls for mmu") removed the last uses of both
cpu_openrisc_map_address_code() and
cpu_openrisc_map_address_data() helpers.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-4-philmd@linaro.org>
---
target/openrisc/cpu.h | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index f4bcf00b073..87201365a91 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -220,15 +220,6 @@ typedef struct OpenRISCTLBEntry {
typedef struct CPUOpenRISCTLBContext {
OpenRISCTLBEntry itlb[TLB_SIZE];
OpenRISCTLBEntry dtlb[TLB_SIZE];
-
- int (*cpu_openrisc_map_address_code)(OpenRISCCPU *cpu,
- hwaddr *physical,
- int *prot,
- target_ulong address, int rw);
- int (*cpu_openrisc_map_address_data)(OpenRISCCPU *cpu,
- hwaddr *physical,
- int *prot,
- target_ulong address, int rw);
} CPUOpenRISCTLBContext;
#endif
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 30/75] target/openrisc: Remove target_ulong use in raise_mmu_exception()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (28 preceding siblings ...)
2025-10-16 12:14 ` [PULL 29/75] target/openrisc: Remove unused cpu_openrisc_map_address_*() handlers Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 31/75] target/openrisc: Use vaddr type for $pc jumps Philippe Mathieu-Daudé
` (44 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
TCGCPUOps::tlb_fill() handler provides a vaddr type (since commit
7510454e3e7 "cpu: Turn cpu_handle_mmu_fault() into a CPUClass hook").
Do not inline get_phys_nommu(), rely on the linker to optimize at
linking time.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-5-philmd@linaro.org>
---
target/openrisc/mmu.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
index acea50c41eb..ffb732e0d1f 100644
--- a/target/openrisc/mmu.c
+++ b/target/openrisc/mmu.c
@@ -28,15 +28,14 @@
#include "qemu/host-utils.h"
#include "hw/loader.h"
-static inline void get_phys_nommu(hwaddr *phys_addr, int *prot,
- target_ulong address)
+static void get_phys_nommu(hwaddr *phys_addr, int *prot, vaddr address)
{
*phys_addr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
}
static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot,
- target_ulong addr, int need, bool super)
+ vaddr addr, int need, bool super)
{
int idx = (addr >> TARGET_PAGE_BITS) & TLB_MASK;
uint32_t imr = cpu->env.tlb.itlb[idx].mr;
@@ -95,7 +94,7 @@ static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot,
}
}
-static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address,
+static void raise_mmu_exception(OpenRISCCPU *cpu, vaddr address,
int exception)
{
CPUState *cs = CPU(cpu);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 31/75] target/openrisc: Use vaddr type for $pc jumps
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (29 preceding siblings ...)
2025-10-16 12:14 ` [PULL 30/75] target/openrisc: Remove target_ulong use in raise_mmu_exception() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 32/75] target/openrisc: Remove 'TARGET_LONG_BITS != 32' dead code Philippe Mathieu-Daudé
` (43 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
translator_use_goto_tb() expects a vaddr type since commit
b1c09220b4c ("accel/tcg: Replace target_ulong with vaddr in
translator_*()").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-6-philmd@linaro.org>
---
target/openrisc/translate.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 5ab3bc7021d..9f61f917b3b 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -51,7 +51,7 @@ typedef struct DisasContext {
uint32_t avr;
/* If not -1, jmp_pc contains this value and so is a direct jump. */
- target_ulong jmp_pc_imm;
+ vaddr jmp_pc_imm;
/* The temporary corresponding to register 0 for this compilation. */
TCGv R0;
@@ -580,7 +580,7 @@ static bool trans_l_muldu(DisasContext *dc, arg_ab *a)
static bool trans_l_j(DisasContext *dc, arg_l_j *a)
{
- target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
+ vaddr tmp_pc = dc->base.pc_next + a->n * 4;
tcg_gen_movi_tl(jmp_pc, tmp_pc);
dc->jmp_pc_imm = tmp_pc;
@@ -590,8 +590,8 @@ static bool trans_l_j(DisasContext *dc, arg_l_j *a)
static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
{
- target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
- target_ulong ret_pc = dc->base.pc_next + 8;
+ vaddr tmp_pc = dc->base.pc_next + a->n * 4;
+ vaddr ret_pc = dc->base.pc_next + 8;
tcg_gen_movi_tl(cpu_regs[9], ret_pc);
/* Optimize jal being used to load the PC for PIC. */
@@ -605,7 +605,7 @@ static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
{
- target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
+ vaddr tmp_pc = dc->base.pc_next + a->n * 4;
TCGv t_next = tcg_constant_tl(dc->base.pc_next + 8);
TCGv t_true = tcg_constant_tl(tmp_pc);
@@ -1586,7 +1586,7 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- target_ulong jmp_dest;
+ vaddr jmp_dest;
/* If we have already exited the TB, nothing following has effect. */
if (dc->base.is_jmp == DISAS_NORETURN) {
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 32/75] target/openrisc: Remove 'TARGET_LONG_BITS != 32' dead code
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (30 preceding siblings ...)
2025-10-16 12:14 ` [PULL 31/75] target/openrisc: Use vaddr type for $pc jumps Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 33/75] target/openrisc: Explode MO_TExx -> MO_TE | MO_xx Philippe Mathieu-Daudé
` (42 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
The OpenRISC targets are only built as 32-bit:
$ git grep TARGET_LONG_BITS configs/targets/or1k-*
configs/targets/or1k-linux-user.mak:5:TARGET_LONG_BITS=32
configs/targets/or1k-softmmu.mak:5:TARGET_LONG_BITS=32
Remove the dead code guarded within TARGET_LONG_BITS != 32.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-7-philmd@linaro.org>
---
target/openrisc/translate.c | 33 ++++-----------------------------
1 file changed, 4 insertions(+), 29 deletions(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 9f61f917b3b..29e6b51a930 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -298,19 +298,8 @@ static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
tcg_gen_ext_tl_i64(t1, srca);
tcg_gen_ext_tl_i64(t2, srcb);
- if (TARGET_LONG_BITS == 32) {
- tcg_gen_mul_i64(cpu_mac, t1, t2);
- tcg_gen_movi_tl(cpu_sr_ov, 0);
- } else {
- TCGv_i64 high = tcg_temp_new_i64();
-
- tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
- tcg_gen_sari_i64(t1, cpu_mac, 63);
- tcg_gen_negsetcond_i64(TCG_COND_NE, t1, t1, high);
- tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
-
- gen_ove_ov(dc);
- }
+ tcg_gen_mul_i64(cpu_mac, t1, t2);
+ tcg_gen_movi_tl(cpu_sr_ov, 0);
}
static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
@@ -320,18 +309,8 @@ static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
tcg_gen_extu_tl_i64(t1, srca);
tcg_gen_extu_tl_i64(t2, srcb);
- if (TARGET_LONG_BITS == 32) {
- tcg_gen_mul_i64(cpu_mac, t1, t2);
- tcg_gen_movi_tl(cpu_sr_cy, 0);
- } else {
- TCGv_i64 high = tcg_temp_new_i64();
-
- tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
- tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
- tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
-
- gen_ove_cy(dc);
- }
+ tcg_gen_mul_i64(cpu_mac, t1, t2);
+ tcg_gen_movi_tl(cpu_sr_cy, 0);
}
static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
@@ -349,11 +328,7 @@ static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
tcg_gen_xor_i64(t1, t1, cpu_mac);
tcg_gen_andc_i64(t1, t1, t2);
-#if TARGET_LONG_BITS == 32
tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
-#else
- tcg_gen_mov_i64(cpu_sr_ov, t1);
-#endif
gen_ove_ov(dc);
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 33/75] target/openrisc: Explode MO_TExx -> MO_TE | MO_xx
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (31 preceding siblings ...)
2025-10-16 12:14 ` [PULL 32/75] target/openrisc: Remove 'TARGET_LONG_BITS != 32' dead code Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 34/75] target/openrisc: Conceal MO_TE within do_load() Philippe Mathieu-Daudé
` (41 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Extract the implicit MO_TE definition in order to replace
it in the next commit.
Mechanical change using:
$ for n in UW UL UQ UO SW SL SQ; do \
sed -i -e "s/MO_TE$n/MO_TE | MO_$n/" \
$(git grep -l MO_TE$n target/openrisc); \
done
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-8-philmd@linaro.org>
---
target/openrisc/translate.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 29e6b51a930..52d51313f77 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -622,7 +622,7 @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a)
check_r0_write(dc, a->d);
ea = tcg_temp_new();
tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL);
+ tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TE | MO_UL);
tcg_gen_mov_tl(cpu_lock_addr, ea);
tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d));
return true;
@@ -640,13 +640,13 @@ static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
static bool trans_l_lwz(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TEUL);
+ do_load(dc, a, MO_TE | MO_UL);
return true;
}
static bool trans_l_lws(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TESL);
+ do_load(dc, a, MO_TE | MO_SL);
return true;
}
@@ -664,13 +664,13 @@ static bool trans_l_lbs(DisasContext *dc, arg_load *a)
static bool trans_l_lhz(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TEUW);
+ do_load(dc, a, MO_TE | MO_UW);
return true;
}
static bool trans_l_lhs(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TESW);
+ do_load(dc, a, MO_TE | MO_SW);
return true;
}
@@ -688,7 +688,7 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a)
val = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
- cpu_R(dc, a->b), dc->mem_idx, MO_TEUL);
+ cpu_R(dc, a->b), dc->mem_idx, MO_TE | MO_UL);
tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
tcg_gen_br(lab_done);
@@ -710,7 +710,7 @@ static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
static bool trans_l_sw(DisasContext *dc, arg_store *a)
{
- do_store(dc, a, MO_TEUL);
+ do_store(dc, a, MO_TE | MO_UL);
return true;
}
@@ -722,7 +722,7 @@ static bool trans_l_sb(DisasContext *dc, arg_store *a)
static bool trans_l_sh(DisasContext *dc, arg_store *a)
{
- do_store(dc, a, MO_TEUW);
+ do_store(dc, a, MO_TE | MO_UW);
return true;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 34/75] target/openrisc: Conceal MO_TE within do_load()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (32 preceding siblings ...)
2025-10-16 12:14 ` [PULL 33/75] target/openrisc: Explode MO_TExx -> MO_TE | MO_xx Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 35/75] target/openrisc: Conceal MO_TE within do_store() Philippe Mathieu-Daudé
` (40 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-9-philmd@linaro.org>
---
target/openrisc/translate.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 52d51313f77..a6ba1747eca 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -632,6 +632,8 @@ static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
{
TCGv ea;
+ mop |= MO_TE;
+
check_r0_write(dc, a->d);
ea = tcg_temp_new();
tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
@@ -640,13 +642,13 @@ static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
static bool trans_l_lwz(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TE | MO_UL);
+ do_load(dc, a, MO_UL);
return true;
}
static bool trans_l_lws(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TE | MO_SL);
+ do_load(dc, a, MO_SL);
return true;
}
@@ -664,13 +666,13 @@ static bool trans_l_lbs(DisasContext *dc, arg_load *a)
static bool trans_l_lhz(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TE | MO_UW);
+ do_load(dc, a, MO_UW);
return true;
}
static bool trans_l_lhs(DisasContext *dc, arg_load *a)
{
- do_load(dc, a, MO_TE | MO_SW);
+ do_load(dc, a, MO_SW);
return true;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 35/75] target/openrisc: Conceal MO_TE within do_store()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (33 preceding siblings ...)
2025-10-16 12:14 ` [PULL 34/75] target/openrisc: Conceal MO_TE within do_load() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 36/75] target/openrisc: Introduce mo_endian() helper Philippe Mathieu-Daudé
` (39 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-10-philmd@linaro.org>
---
target/openrisc/translate.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index a6ba1747eca..623513bb15c 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -706,13 +706,16 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a)
static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
{
TCGv t0 = tcg_temp_new();
+
+ mop |= MO_TE;
+
tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i);
tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
}
static bool trans_l_sw(DisasContext *dc, arg_store *a)
{
- do_store(dc, a, MO_TE | MO_UL);
+ do_store(dc, a, MO_UL);
return true;
}
@@ -724,7 +727,7 @@ static bool trans_l_sb(DisasContext *dc, arg_store *a)
static bool trans_l_sh(DisasContext *dc, arg_store *a)
{
- do_store(dc, a, MO_TE | MO_UW);
+ do_store(dc, a, MO_UW);
return true;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 36/75] target/openrisc: Introduce mo_endian() helper
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (34 preceding siblings ...)
2025-10-16 12:14 ` [PULL 35/75] target/openrisc: Conceal MO_TE within do_store() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 37/75] target/openrisc: Replace MO_TE -> MO_BE Philippe Mathieu-Daudé
` (38 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
mo_endian() returns the target endianness.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-11-philmd@linaro.org>
---
target/openrisc/translate.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 623513bb15c..1a43e5ab6f7 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -59,6 +59,11 @@ typedef struct DisasContext {
TCGv zero;
} DisasContext;
+static inline MemOp mo_endian(DisasContext *dc)
+{
+ return MO_TE;
+}
+
static inline bool is_user(DisasContext *dc)
{
#ifdef CONFIG_USER_ONLY
@@ -622,7 +627,8 @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a)
check_r0_write(dc, a->d);
ea = tcg_temp_new();
tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TE | MO_UL);
+ tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx,
+ mo_endian(dc) | MO_UL);
tcg_gen_mov_tl(cpu_lock_addr, ea);
tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d));
return true;
@@ -632,7 +638,7 @@ static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
{
TCGv ea;
- mop |= MO_TE;
+ mop |= mo_endian(dc);
check_r0_write(dc, a->d);
ea = tcg_temp_new();
@@ -690,7 +696,8 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a)
val = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
- cpu_R(dc, a->b), dc->mem_idx, MO_TE | MO_UL);
+ cpu_R(dc, a->b), dc->mem_idx,
+ mo_endian(dc) | MO_UL);
tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
tcg_gen_br(lab_done);
@@ -707,7 +714,7 @@ static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
{
TCGv t0 = tcg_temp_new();
- mop |= MO_TE;
+ mop |= mo_endian(dc);
tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i);
tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 37/75] target/openrisc: Replace MO_TE -> MO_BE
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (35 preceding siblings ...)
2025-10-16 12:14 ` [PULL 36/75] target/openrisc: Introduce mo_endian() helper Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 38/75] target/openrisc: Inline tcg_gen_trunc_i64_tl() Philippe Mathieu-Daudé
` (37 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
We only build the OpenRISC targets using big endianness order:
$ git grep TARGET_BIG_ENDIAN configs/targets/or1k-*
configs/targets/or1k-linux-user.mak:2:TARGET_BIG_ENDIAN=y
configs/targets/or1k-softmmu.mak:2:TARGET_BIG_ENDIAN=y
Therefore the MO_TE definition always expands to MO_BE. Use the
latter to simplify.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-12-philmd@linaro.org>
---
target/openrisc/translate.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 1a43e5ab6f7..3d9dfd663aa 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -61,7 +61,8 @@ typedef struct DisasContext {
static inline MemOp mo_endian(DisasContext *dc)
{
- return MO_TE;
+ /* The SR_LEE bit sets the (little) endianness, but we don't implement it. */
+ return MO_BE;
}
static inline bool is_user(DisasContext *dc)
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 38/75] target/openrisc: Inline tcg_gen_trunc_i64_tl()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (36 preceding siblings ...)
2025-10-16 12:14 ` [PULL 37/75] target/openrisc: Replace MO_TE -> MO_BE Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 39/75] target/openrisc: Replace target_ulong -> uint32_t Philippe Mathieu-Daudé
` (36 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
The OpenRISC targets are only built as 32-bit, so tcg_gen_trunc_i64_tl
expands to tcg_gen_extrl_i64_i32(). Use the latter to simplify the
next commit mechanical change.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-13-philmd@linaro.org>
---
target/openrisc/translate.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 3d9dfd663aa..beb4a546fd7 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -351,7 +351,7 @@ static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
/* Note that overflow is only computed during addition stage. */
tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
- tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
+ tcg_gen_extrl_i64_i32(cpu_sr_cy, t1);
gen_ove_cy(dc);
}
@@ -392,7 +392,7 @@ static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
/* Note that overflow is only computed during subtraction stage. */
tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
- tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
+ tcg_gen_extrl_i64_i32(cpu_sr_cy, t2);
gen_ove_cy(dc);
}
@@ -917,7 +917,7 @@ static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a)
static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a)
{
check_r0_write(dc, a->d);
- tcg_gen_trunc_i64_tl(cpu_R(dc, a->d), cpu_mac);
+ tcg_gen_extrl_i64_i32(cpu_R(dc, a->d), cpu_mac);
tcg_gen_movi_i64(cpu_mac, 0);
return true;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 39/75] target/openrisc: Replace target_ulong -> uint32_t
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (37 preceding siblings ...)
2025-10-16 12:14 ` [PULL 38/75] target/openrisc: Inline tcg_gen_trunc_i64_tl() Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 40/75] target/riscv: Use 32 bits for misa extensions Philippe Mathieu-Daudé
` (35 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
The OpenRISC targets are only built as 32-bit:
$ git grep TARGET_LONG_BITS configs/targets/or1k-*
configs/targets/or1k-linux-user.mak:5:TARGET_LONG_BITS=32
configs/targets/or1k-softmmu.mak:5:TARGET_LONG_BITS=32
Therefore target_ulong always expands to uint32_t. Replace and
adapt the API uses mechanically:
target_ulong -> uint32_t
target_long -> int32_t
tl -> i32
TCGv -> TCGv_i32
tcg_temp_new -> tcg_temp_new_i32
tcg_global_mem_new -> tcg_global_mem_new_i32
VMSTATE_UINTTL -> VMSTATE_UINT32
There is no functional change (the migration stream is not modified).
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010070702.51484-14-philmd@linaro.org>
---
target/openrisc/cpu.h | 22 +-
target/openrisc/helper.h | 8 +-
target/openrisc/fpu_helper.c | 8 +-
target/openrisc/machine.c | 16 +-
target/openrisc/sys_helper.c | 5 +-
target/openrisc/translate.c | 391 ++++++++++++++++++-----------------
6 files changed, 228 insertions(+), 222 deletions(-)
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index 87201365a91..c8e2827930b 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -224,20 +224,20 @@ typedef struct CPUOpenRISCTLBContext {
#endif
typedef struct CPUArchState {
- target_ulong shadow_gpr[16][32]; /* Shadow registers */
+ uint32_t shadow_gpr[16][32]; /* Shadow registers */
- target_ulong pc; /* Program counter */
- target_ulong ppc; /* Prev PC */
- target_ulong jmp_pc; /* Jump PC */
+ uint32_t pc; /* Program counter */
+ uint32_t ppc; /* Prev PC */
+ uint32_t jmp_pc; /* Jump PC */
uint64_t mac; /* Multiply registers MACHI:MACLO */
- target_ulong epcr; /* Exception PC register */
- target_ulong eear; /* Exception EA register */
+ uint32_t epcr; /* Exception PC register */
+ uint32_t eear; /* Exception EA register */
- target_ulong sr_f; /* the SR_F bit, values 0, 1. */
- target_ulong sr_cy; /* the SR_CY bit, values 0, 1. */
- target_long sr_ov; /* the SR_OV bit (in the sign bit only) */
+ uint32_t sr_f; /* the SR_F bit, values 0, 1. */
+ uint32_t sr_cy; /* the SR_CY bit, values 0, 1. */
+ int32_t sr_ov; /* the SR_OV bit (in the sign bit only) */
uint32_t sr; /* Supervisor register, without SR_{F,CY,OV} */
uint32_t esr; /* Exception supervisor register */
uint32_t evbar; /* Exception vector base address register */
@@ -245,8 +245,8 @@ typedef struct CPUArchState {
uint32_t fpcsr; /* Float register */
float_status fp_status;
- target_ulong lock_addr;
- target_ulong lock_value;
+ uint32_t lock_addr;
+ uint32_t lock_value;
uint32_t dflag; /* In delay slot (boolean) */
diff --git a/target/openrisc/helper.h b/target/openrisc/helper.h
index d847814a28d..e0a8d402271 100644
--- a/target/openrisc/helper.h
+++ b/target/openrisc/helper.h
@@ -47,8 +47,8 @@ FOP_CALC(rem)
#undef FOP_CALC
#define FOP_CMP(op) \
-DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, tl, env, i32, i32) \
-DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, i32, env, i64, i64)
FOP_CMP(eq)
FOP_CMP(lt)
FOP_CMP(le)
@@ -62,5 +62,5 @@ FOP_CMP(ult)
DEF_HELPER_FLAGS_1(rfe, 0, void, env)
/* sys */
-DEF_HELPER_FLAGS_3(mtspr, 0, void, env, tl, tl)
-DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, tl, env, tl, tl)
+DEF_HELPER_FLAGS_3(mtspr, 0, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, i32, env, i32, i32)
diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c
index dba997255c6..39b6195dd7d 100644
--- a/target/openrisc/fpu_helper.c
+++ b/target/openrisc/fpu_helper.c
@@ -146,10 +146,10 @@ uint32_t helper_float_madd_s(CPUOpenRISCState *env, uint32_t a,
#define FLOAT_CMP(name, impl) \
-target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
uint64_t fdt0, uint64_t fdt1) \
{ return float64_ ## impl(fdt0, fdt1, &env->fp_status); } \
-target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
uint32_t fdt0, uint32_t fdt1) \
{ return float32_ ## impl(fdt0, fdt1, &env->fp_status); }
@@ -160,13 +160,13 @@ FLOAT_CMP(un, unordered_quiet)
#undef FLOAT_CMP
#define FLOAT_UCMP(name, expr) \
-target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
uint64_t fdt0, uint64_t fdt1) \
{ \
FloatRelation r = float64_compare_quiet(fdt0, fdt1, &env->fp_status); \
return expr; \
} \
-target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
uint32_t fdt0, uint32_t fdt1) \
{ \
FloatRelation r = float32_compare_quiet(fdt0, fdt1, &env->fp_status); \
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
index fa054e528bd..f2853674f0f 100644
--- a/target/openrisc/machine.c
+++ b/target/openrisc/machine.c
@@ -72,14 +72,14 @@ static const VMStateDescription vmstate_env = {
.version_id = 6,
.minimum_version_id = 6,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32),
- VMSTATE_UINTTL(pc, CPUOpenRISCState),
- VMSTATE_UINTTL(ppc, CPUOpenRISCState),
- VMSTATE_UINTTL(jmp_pc, CPUOpenRISCState),
- VMSTATE_UINTTL(lock_addr, CPUOpenRISCState),
- VMSTATE_UINTTL(lock_value, CPUOpenRISCState),
- VMSTATE_UINTTL(epcr, CPUOpenRISCState),
- VMSTATE_UINTTL(eear, CPUOpenRISCState),
+ VMSTATE_UINT32_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32),
+ VMSTATE_UINT32(pc, CPUOpenRISCState),
+ VMSTATE_UINT32(ppc, CPUOpenRISCState),
+ VMSTATE_UINT32(jmp_pc, CPUOpenRISCState),
+ VMSTATE_UINT32(lock_addr, CPUOpenRISCState),
+ VMSTATE_UINT32(lock_value, CPUOpenRISCState),
+ VMSTATE_UINT32(epcr, CPUOpenRISCState),
+ VMSTATE_UINT32(eear, CPUOpenRISCState),
/* Save the architecture value of the SR, not the internally
expanded version. Since this architecture value does not
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
index ad59939db3b..7ad908b6322 100644
--- a/target/openrisc/sys_helper.c
+++ b/target/openrisc/sys_helper.c
@@ -40,7 +40,7 @@ static inline bool is_user(CPUOpenRISCState *env)
#endif
}
-void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
+void HELPER(mtspr)(CPUOpenRISCState *env, uint32_t spr, uint32_t rb)
{
OpenRISCCPU *cpu = env_archcpu(env);
#ifndef CONFIG_USER_ONLY
@@ -213,8 +213,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
#endif
}
-target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
- target_ulong spr)
+uint32_t HELPER(mfspr)(CPUOpenRISCState *env, uint32_t rd, uint32_t spr)
{
OpenRISCCPU *cpu = env_archcpu(env);
#ifndef CONFIG_USER_ONLY
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index beb4a546fd7..6fa4d6cfa70 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -54,9 +54,9 @@ typedef struct DisasContext {
vaddr jmp_pc_imm;
/* The temporary corresponding to register 0 for this compilation. */
- TCGv R0;
+ TCGv_i32 R0;
/* The constant zero. */
- TCGv zero;
+ TCGv_i32 zero;
} DisasContext;
static inline MemOp mo_endian(DisasContext *dc)
@@ -77,16 +77,16 @@ static inline bool is_user(DisasContext *dc)
/* Include the auto-generated decoder. */
#include "decode-insns.c.inc"
-static TCGv cpu_sr;
-static TCGv cpu_regs[32];
-static TCGv cpu_pc;
-static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
-static TCGv cpu_ppc;
-static TCGv cpu_sr_f; /* bf/bnf, F flag taken */
-static TCGv cpu_sr_cy; /* carry (unsigned overflow) */
-static TCGv cpu_sr_ov; /* signed overflow */
-static TCGv cpu_lock_addr;
-static TCGv cpu_lock_value;
+static TCGv_i32 cpu_sr;
+static TCGv_i32 cpu_regs[32];
+static TCGv_i32 cpu_pc;
+static TCGv_i32 jmp_pc; /* l.jr/l.jalr temp pc */
+static TCGv_i32 cpu_ppc;
+static TCGv_i32 cpu_sr_f; /* bf/bnf, F flag taken */
+static TCGv_i32 cpu_sr_cy; /* carry (unsigned overflow) */
+static TCGv_i32 cpu_sr_ov; /* signed overflow */
+static TCGv_i32 cpu_lock_addr;
+static TCGv_i32 cpu_lock_value;
static TCGv_i32 fpcsr;
static TCGv_i64 cpu_mac; /* MACHI:MACLO */
static TCGv_i32 cpu_dflag;
@@ -101,27 +101,27 @@ void openrisc_translate_init(void)
};
int i;
- cpu_sr = tcg_global_mem_new(tcg_env,
+ cpu_sr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr), "sr");
cpu_dflag = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, dflag),
"dflag");
- cpu_pc = tcg_global_mem_new(tcg_env,
+ cpu_pc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, pc), "pc");
- cpu_ppc = tcg_global_mem_new(tcg_env,
+ cpu_ppc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, ppc), "ppc");
- jmp_pc = tcg_global_mem_new(tcg_env,
+ jmp_pc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
- cpu_sr_f = tcg_global_mem_new(tcg_env,
+ cpu_sr_f = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr_f), "sr_f");
- cpu_sr_cy = tcg_global_mem_new(tcg_env,
+ cpu_sr_cy = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
- cpu_sr_ov = tcg_global_mem_new(tcg_env,
+ cpu_sr_ov = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
- cpu_lock_addr = tcg_global_mem_new(tcg_env,
+ cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, lock_addr),
"lock_addr");
- cpu_lock_value = tcg_global_mem_new(tcg_env,
+ cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, lock_value),
"lock_value");
fpcsr = tcg_global_mem_new_i32(tcg_env,
@@ -131,7 +131,7 @@ void openrisc_translate_init(void)
offsetof(CPUOpenRISCState, mac),
"mac");
for (i = 0; i < 32; i++) {
- cpu_regs[i] = tcg_global_mem_new(tcg_env,
+ cpu_regs[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState,
shadow_gpr[0][i]),
regnames[i]);
@@ -145,7 +145,7 @@ static void gen_exception(DisasContext *dc, unsigned int excp)
static void gen_illegal_exception(DisasContext *dc)
{
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_ILLEGAL);
dc->base.is_jmp = DISAS_NORETURN;
}
@@ -165,7 +165,7 @@ static bool check_of64a32s(DisasContext *dc)
return dc->cpucfgr & CPUCFGR_OF64A32S;
}
-static TCGv cpu_R(DisasContext *dc, int reg)
+static TCGv_i32 cpu_R(DisasContext *dc, int reg)
{
if (reg == 0) {
return dc->R0;
@@ -206,126 +206,133 @@ static void gen_ove_cyov(DisasContext *dc)
}
}
-static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_add(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
- TCGv res = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 res = tcg_temp_new_i32();
- tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero);
- tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
- tcg_gen_xor_tl(t0, res, srcb);
- tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
+ tcg_gen_add2_i32(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero);
+ tcg_gen_xor_i32(cpu_sr_ov, srca, srcb);
+ tcg_gen_xor_i32(t0, res, srcb);
+ tcg_gen_andc_i32(cpu_sr_ov, t0, cpu_sr_ov);
- tcg_gen_mov_tl(dest, res);
+ tcg_gen_mov_i32(dest, res);
gen_ove_cyov(dc);
}
-static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_addc(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
- TCGv res = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 res = tcg_temp_new_i32();
- tcg_gen_addcio_tl(res, cpu_sr_cy, srca, srcb, cpu_sr_cy);
- tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
- tcg_gen_xor_tl(t0, res, srcb);
- tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
+ tcg_gen_addcio_i32(res, cpu_sr_cy, srca, srcb, cpu_sr_cy);
+ tcg_gen_xor_i32(cpu_sr_ov, srca, srcb);
+ tcg_gen_xor_i32(t0, res, srcb);
+ tcg_gen_andc_i32(cpu_sr_ov, t0, cpu_sr_ov);
- tcg_gen_mov_tl(dest, res);
+ tcg_gen_mov_i32(dest, res);
gen_ove_cyov(dc);
}
-static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_sub(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv res = tcg_temp_new();
+ TCGv_i32 res = tcg_temp_new_i32();
- tcg_gen_sub_tl(res, srca, srcb);
- tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
- tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
- tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
+ tcg_gen_sub_i32(res, srca, srcb);
+ tcg_gen_xor_i32(cpu_sr_cy, srca, srcb);
+ tcg_gen_xor_i32(cpu_sr_ov, res, srcb);
+ tcg_gen_and_i32(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
- tcg_gen_mov_tl(dest, res);
+ tcg_gen_mov_i32(dest, res);
gen_ove_cyov(dc);
}
-static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_mul(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
- tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
- tcg_gen_negsetcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
+ tcg_gen_muls2_i32(dest, cpu_sr_ov, srca, srcb);
+ tcg_gen_sari_i32(t0, dest, TARGET_LONG_BITS - 1);
+ tcg_gen_negsetcond_i32(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
gen_ove_ov(dc);
}
-static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_mulu(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
+ tcg_gen_muls2_i32(dest, cpu_sr_cy, srca, srcb);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
gen_ove_cy(dc);
}
-static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_div(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
/* The result of divide-by-zero is undefined.
Suppress the host-side exception by dividing by 1. */
- tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
- tcg_gen_div_tl(dest, srca, t0);
+ tcg_gen_or_i32(t0, srcb, cpu_sr_ov);
+ tcg_gen_div_i32(dest, srca, t0);
- tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
+ tcg_gen_neg_i32(cpu_sr_ov, cpu_sr_ov);
gen_ove_ov(dc);
}
-static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+static void gen_divu(DisasContext *dc, TCGv_i32 dest,
+ TCGv_i32 srca, TCGv_i32 srcb)
{
- TCGv t0 = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
/* The result of divide-by-zero is undefined.
Suppress the host-side exception by dividing by 1. */
- tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
- tcg_gen_divu_tl(dest, srca, t0);
+ tcg_gen_or_i32(t0, srcb, cpu_sr_cy);
+ tcg_gen_divu_i32(dest, srca, t0);
gen_ove_cy(dc);
}
-static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_muld(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_ext_tl_i64(t1, srca);
- tcg_gen_ext_tl_i64(t2, srcb);
+ tcg_gen_ext_i32_i64(t1, srca);
+ tcg_gen_ext_i32_i64(t2, srcb);
tcg_gen_mul_i64(cpu_mac, t1, t2);
- tcg_gen_movi_tl(cpu_sr_ov, 0);
+ tcg_gen_movi_i32(cpu_sr_ov, 0);
}
-static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_muldu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(t1, srca);
- tcg_gen_extu_tl_i64(t2, srcb);
+ tcg_gen_extu_i32_i64(t1, srca);
+ tcg_gen_extu_i32_i64(t2, srcb);
tcg_gen_mul_i64(cpu_mac, t1, t2);
- tcg_gen_movi_tl(cpu_sr_cy, 0);
+ tcg_gen_movi_i32(cpu_sr_cy, 0);
}
-static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_mac(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_ext_tl_i64(t1, srca);
- tcg_gen_ext_tl_i64(t2, srcb);
+ tcg_gen_ext_i32_i64(t1, srca);
+ tcg_gen_ext_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during addition stage. */
@@ -339,13 +346,13 @@ static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
gen_ove_ov(dc);
}
-static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_macu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(t1, srca);
- tcg_gen_extu_tl_i64(t2, srcb);
+ tcg_gen_extu_i32_i64(t1, srca);
+ tcg_gen_extu_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during addition stage. */
@@ -356,13 +363,13 @@ static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
gen_ove_cy(dc);
}
-static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_msb(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_ext_tl_i64(t1, srca);
- tcg_gen_ext_tl_i64(t2, srcb);
+ tcg_gen_ext_i32_i64(t1, srca);
+ tcg_gen_ext_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during subtraction stage. */
@@ -380,13 +387,13 @@ static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
gen_ove_ov(dc);
}
-static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
+static void gen_msbu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(t1, srca);
- tcg_gen_extu_tl_i64(t2, srcb);
+ tcg_gen_extu_i32_i64(t1, srca);
+ tcg_gen_extu_i32_i64(t2, srcb);
tcg_gen_mul_i64(t1, t1, t2);
/* Note that overflow is only computed during subtraction stage. */
@@ -421,84 +428,84 @@ static bool trans_l_sub(DisasContext *dc, arg_dab *a)
static bool trans_l_and(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_and_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_and_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_or(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_or_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_or_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_xor(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_xor_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_xor_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sll(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shl_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_shl_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_srl(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_shr_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sra(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_sar_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_sar_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_ror(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_rotr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ tcg_gen_rotr_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_exths(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext16s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext16s_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_extbs(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext8s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext8s_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_exthz(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext16u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext16u_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_extbz(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ext8u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ tcg_gen_ext8u_i32(cpu_R(dc, a->d), cpu_R(dc, a->a));
return true;
}
static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
{
check_r0_write(dc, a->d);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero,
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
@@ -506,16 +513,16 @@ static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
static bool trans_l_ff1(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ctzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), -1);
- tcg_gen_addi_tl(cpu_R(dc, a->d), cpu_R(dc, a->d), 1);
+ tcg_gen_ctzi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), -1);
+ tcg_gen_addi_i32(cpu_R(dc, a->d), cpu_R(dc, a->d), 1);
return true;
}
static bool trans_l_fl1(DisasContext *dc, arg_da *a)
{
check_r0_write(dc, a->d);
- tcg_gen_clzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS);
- tcg_gen_subfi_tl(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d));
+ tcg_gen_clzi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS);
+ tcg_gen_subfi_i32(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d));
return true;
}
@@ -563,7 +570,7 @@ static bool trans_l_j(DisasContext *dc, arg_l_j *a)
{
vaddr tmp_pc = dc->base.pc_next + a->n * 4;
- tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ tcg_gen_movi_i32(jmp_pc, tmp_pc);
dc->jmp_pc_imm = tmp_pc;
dc->delayed_branch = 2;
return true;
@@ -574,10 +581,10 @@ static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
vaddr tmp_pc = dc->base.pc_next + a->n * 4;
vaddr ret_pc = dc->base.pc_next + 8;
- tcg_gen_movi_tl(cpu_regs[9], ret_pc);
+ tcg_gen_movi_i32(cpu_regs[9], ret_pc);
/* Optimize jal being used to load the PC for PIC. */
if (tmp_pc != ret_pc) {
- tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ tcg_gen_movi_i32(jmp_pc, tmp_pc);
dc->jmp_pc_imm = tmp_pc;
dc->delayed_branch = 2;
}
@@ -587,10 +594,10 @@ static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
{
vaddr tmp_pc = dc->base.pc_next + a->n * 4;
- TCGv t_next = tcg_constant_tl(dc->base.pc_next + 8);
- TCGv t_true = tcg_constant_tl(tmp_pc);
+ TCGv_i32 t_next = tcg_constant_i32(dc->base.pc_next + 8);
+ TCGv_i32 t_true = tcg_constant_i32(tmp_pc);
- tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next);
+ tcg_gen_movcond_i32(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next);
dc->delayed_branch = 2;
}
@@ -608,43 +615,43 @@ static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a)
static bool trans_l_jr(DisasContext *dc, arg_l_jr *a)
{
- tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
+ tcg_gen_mov_i32(jmp_pc, cpu_R(dc, a->b));
dc->delayed_branch = 2;
return true;
}
static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a)
{
- tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
- tcg_gen_movi_tl(cpu_regs[9], dc->base.pc_next + 8);
+ tcg_gen_mov_i32(jmp_pc, cpu_R(dc, a->b));
+ tcg_gen_movi_i32(cpu_regs[9], dc->base.pc_next + 8);
dc->delayed_branch = 2;
return true;
}
static bool trans_l_lwa(DisasContext *dc, arg_load *a)
{
- TCGv ea;
+ TCGv_i32 ea;
check_r0_write(dc, a->d);
- ea = tcg_temp_new();
- tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx,
- mo_endian(dc) | MO_UL);
- tcg_gen_mov_tl(cpu_lock_addr, ea);
- tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d));
+ ea = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_ld_i32(cpu_R(dc, a->d), ea, dc->mem_idx,
+ mo_endian(dc) | MO_UL);
+ tcg_gen_mov_i32(cpu_lock_addr, ea);
+ tcg_gen_mov_i32(cpu_lock_value, cpu_R(dc, a->d));
return true;
}
static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
{
- TCGv ea;
+ TCGv_i32 ea;
mop |= mo_endian(dc);
check_r0_write(dc, a->d);
- ea = tcg_temp_new();
- tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop);
+ ea = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_ld_i32(cpu_R(dc, a->d), ea, dc->mem_idx, mop);
}
static bool trans_l_lwz(DisasContext *dc, arg_load *a)
@@ -685,40 +692,40 @@ static bool trans_l_lhs(DisasContext *dc, arg_load *a)
static bool trans_l_swa(DisasContext *dc, arg_store *a)
{
- TCGv ea, val;
+ TCGv_i32 ea, val;
TCGLabel *lab_fail, *lab_done;
- ea = tcg_temp_new();
- tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
+ ea = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i);
lab_fail = gen_new_label();
lab_done = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
+ tcg_gen_brcond_i32(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
- val = tcg_temp_new();
- tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
+ val = tcg_temp_new_i32();
+ tcg_gen_atomic_cmpxchg_i32(val, cpu_lock_addr, cpu_lock_value,
cpu_R(dc, a->b), dc->mem_idx,
mo_endian(dc) | MO_UL);
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
tcg_gen_br(lab_done);
gen_set_label(lab_fail);
- tcg_gen_movi_tl(cpu_sr_f, 0);
+ tcg_gen_movi_i32(cpu_sr_f, 0);
gen_set_label(lab_done);
- tcg_gen_movi_tl(cpu_lock_addr, -1);
+ tcg_gen_movi_i32(cpu_lock_addr, -1);
return true;
}
static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
{
- TCGv t0 = tcg_temp_new();
+ TCGv_i32 t0 = tcg_temp_new_i32();
mop |= mo_endian(dc);
- tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i);
- tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
+ tcg_gen_addi_i32(t0, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_st_i32(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
}
static bool trans_l_sw(DisasContext *dc, arg_store *a)
@@ -760,75 +767,75 @@ static bool trans_l_adrp(DisasContext *dc, arg_l_adrp *a)
static bool trans_l_addi(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_addic(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_muli(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_maci(DisasContext *dc, arg_l_maci *a)
{
- gen_mac(dc, cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ gen_mac(dc, cpu_R(dc, a->a), tcg_constant_i32(a->i));
return true;
}
static bool trans_l_andi(DisasContext *dc, arg_rrk *a)
{
check_r0_write(dc, a->d);
- tcg_gen_andi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
+ tcg_gen_andi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
return true;
}
static bool trans_l_ori(DisasContext *dc, arg_rrk *a)
{
check_r0_write(dc, a->d);
- tcg_gen_ori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
+ tcg_gen_ori_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
return true;
}
static bool trans_l_xori(DisasContext *dc, arg_rri *a)
{
check_r0_write(dc, a->d);
- tcg_gen_xori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i);
+ tcg_gen_xori_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a)
{
- TCGv spr = tcg_temp_new();
+ TCGv_i32 spr = tcg_temp_new_i32();
check_r0_write(dc, a->d);
if (translator_io_start(&dc->base)) {
if (dc->delayed_branch) {
- tcg_gen_mov_tl(cpu_pc, jmp_pc);
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_mov_i32(cpu_pc, jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
} else {
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
}
dc->base.is_jmp = DISAS_EXIT;
}
- tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
+ tcg_gen_ori_i32(spr, cpu_R(dc, a->a), a->k);
gen_helper_mfspr(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->d), spr);
return true;
}
static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
{
- TCGv spr = tcg_temp_new();
+ TCGv_i32 spr = tcg_temp_new_i32();
translator_io_start(&dc->base);
@@ -839,14 +846,14 @@ static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
* of the cpu state first, allowing it to be overwritten.
*/
if (dc->delayed_branch) {
- tcg_gen_mov_tl(cpu_pc, jmp_pc);
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_mov_i32(cpu_pc, jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
} else {
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
}
dc->base.is_jmp = DISAS_EXIT;
- tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
+ tcg_gen_ori_i32(spr, cpu_R(dc, a->a), a->k);
gen_helper_mtspr(tcg_env, spr, cpu_R(dc, a->b));
return true;
}
@@ -878,7 +885,7 @@ static bool trans_l_msbu(DisasContext *dc, arg_ab *a)
static bool trans_l_slli(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shli_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_shli_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -886,7 +893,7 @@ static bool trans_l_slli(DisasContext *dc, arg_dal *a)
static bool trans_l_srli(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_shri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_shri_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -894,7 +901,7 @@ static bool trans_l_srli(DisasContext *dc, arg_dal *a)
static bool trans_l_srai(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_sari_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_sari_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -902,7 +909,7 @@ static bool trans_l_srai(DisasContext *dc, arg_dal *a)
static bool trans_l_rori(DisasContext *dc, arg_dal *a)
{
check_r0_write(dc, a->d);
- tcg_gen_rotri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ tcg_gen_rotri_i32(cpu_R(dc, a->d), cpu_R(dc, a->a),
a->l & (TARGET_LONG_BITS - 1));
return true;
}
@@ -910,7 +917,7 @@ static bool trans_l_rori(DisasContext *dc, arg_dal *a)
static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a)
{
check_r0_write(dc, a->d);
- tcg_gen_movi_tl(cpu_R(dc, a->d), a->k << 16);
+ tcg_gen_movi_i32(cpu_R(dc, a->d), a->k << 16);
return true;
}
@@ -924,137 +931,137 @@ static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a)
static bool trans_l_sfeq(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfne(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfltu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfleu(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_LEU, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfgts(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfges(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sflts(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f,
+ tcg_gen_setcond_i32(TCG_COND_LT, cpu_sr_f,
cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfles(DisasContext *dc, arg_ab *a)
{
- tcg_gen_setcond_tl(TCG_COND_LE,
+ tcg_gen_setcond_i32(TCG_COND_LE,
cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b));
return true;
}
static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfnei(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfltui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfleui(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sflesi(DisasContext *dc, arg_ai *a)
{
- tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ tcg_gen_setcondi_i32(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i);
return true;
}
static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
{
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_SYSCALL);
dc->base.is_jmp = DISAS_NORETURN;
return true;
@@ -1062,7 +1069,7 @@ static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
static bool trans_l_trap(DisasContext *dc, arg_l_trap *a)
{
- tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_TRAP);
dc->base.is_jmp = DISAS_NORETURN;
return true;
@@ -1096,7 +1103,7 @@ static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a)
}
static bool do_fp2(DisasContext *dc, arg_da *a,
- void (*fn)(TCGv, TCGv_env, TCGv))
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i32))
{
if (!check_of32s(dc)) {
return false;
@@ -1108,7 +1115,7 @@ static bool do_fp2(DisasContext *dc, arg_da *a,
}
static bool do_fp3(DisasContext *dc, arg_dab *a,
- void (*fn)(TCGv, TCGv_env, TCGv, TCGv))
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
{
if (!check_of32s(dc)) {
return false;
@@ -1120,7 +1127,7 @@ static bool do_fp3(DisasContext *dc, arg_dab *a,
}
static bool do_fpcmp(DisasContext *dc, arg_ab *a,
- void (*fn)(TCGv, TCGv_env, TCGv, TCGv),
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32),
bool inv, bool swap)
{
if (!check_of32s(dc)) {
@@ -1132,7 +1139,7 @@ static bool do_fpcmp(DisasContext *dc, arg_ab *a,
fn(cpu_sr_f, tcg_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
}
if (inv) {
- tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
+ tcg_gen_xori_i32(cpu_sr_f, cpu_sr_f, 1);
}
gen_helper_update_fpcsr(tcg_env);
return true;
@@ -1325,7 +1332,7 @@ static bool do_dp2(DisasContext *dc, arg_da_pair *a,
}
static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a,
- void (*fn)(TCGv, TCGv_env, TCGv_i64, TCGv_i64),
+ void (*fn)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i64),
bool inv, bool swap)
{
TCGv_i64 t0, t1;
@@ -1347,7 +1354,7 @@ static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a,
}
if (inv) {
- tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
+ tcg_gen_xori_i32(cpu_sr_f, cpu_sr_f, 1);
}
gen_helper_update_fpcsr(tcg_env);
return true;
@@ -1532,7 +1539,7 @@ static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
/* Allow the TCG optimizer to see that R0 == 0,
when it's true, which is the common case. */
- dc->zero = tcg_constant_tl(0);
+ dc->zero = tcg_constant_i32(0);
if (dc->tb_flags & TB_FLAGS_R0_0) {
dc->R0 = dc->zero;
} else {
@@ -1588,32 +1595,32 @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
/* For DISAS_TOO_MANY, jump to the next insn. */
jmp_dest = dc->base.pc_next;
- tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4);
+ tcg_gen_movi_i32(cpu_ppc, jmp_dest - 4);
switch (dc->base.is_jmp) {
case DISAS_JUMP:
jmp_dest = dc->jmp_pc_imm;
if (jmp_dest == -1) {
/* The jump destination is indirect/computed; use jmp_pc. */
- tcg_gen_mov_tl(cpu_pc, jmp_pc);
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_mov_i32(cpu_pc, jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
tcg_gen_lookup_and_goto_ptr();
break;
}
/* The jump destination is direct; use jmp_pc_imm.
However, we will have stored into jmp_pc as well;
we know now that it wasn't needed. */
- tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_discard_i32(jmp_pc);
/* fallthru */
case DISAS_TOO_MANY:
if (translator_use_goto_tb(&dc->base, jmp_dest)) {
tcg_gen_goto_tb(0);
- tcg_gen_movi_tl(cpu_pc, jmp_dest);
+ tcg_gen_movi_i32(cpu_pc, jmp_dest);
tcg_gen_exit_tb(dc->base.tb, 0);
break;
}
- tcg_gen_movi_tl(cpu_pc, jmp_dest);
+ tcg_gen_movi_i32(cpu_pc, jmp_dest);
tcg_gen_lookup_and_goto_ptr();
break;
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 40/75] target/riscv: Use 32 bits for misa extensions
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (38 preceding siblings ...)
2025-10-16 12:14 ` [PULL 39/75] target/openrisc: Replace target_ulong -> uint32_t Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 41/75] target/riscv: Replace HOST_BIG_ENDIAN #ifdef with if() check Philippe Mathieu-Daudé
` (34 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
From: Anton Johansson <anjo@rev.ng>
uint32_t is already in use in most places storing misa extensions such
as CPUArchState::misa_exts, RISCVCPUProfile::misa_exts,
RISCVImpliedExtsRule::implied_misa_exts. Additionally. the field is
already migrated as uint32_t.
Signed-off-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Message-ID: <20251001073306.28573-2-anjo@rev.ng>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
target/riscv/cpu.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 2c2266415ec..4c13012442d 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -50,7 +50,7 @@ typedef struct CPUArchState CPURISCVState;
*/
#define RISCV_UW2_ALWAYS_STORE_AMO 1
-#define RV(x) ((target_ulong)1 << (x - 'A'))
+#define RV(x) BIT(x - 'A')
/*
* Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
@@ -582,7 +582,7 @@ struct RISCVCPUClass {
RISCVCPUDef *def;
};
-static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
+static inline int riscv_has_ext(CPURISCVState *env, uint32_t ext)
{
return (env->misa_ext & ext) != 0;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 41/75] target/riscv: Replace HOST_BIG_ENDIAN #ifdef with if() check
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (39 preceding siblings ...)
2025-10-16 12:14 ` [PULL 40/75] target/riscv: Use 32 bits for misa extensions Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 42/75] target/rx: Replace target_ulong -> vaddr for translator API uses Philippe Mathieu-Daudé
` (33 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Replace preprocessor-time #ifdef with a compile-time check
to ensure all code paths are built and tested. This reduces
build-time configuration complexity and simplifies code
maintainability.
No functional change intended.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-Id: <20251010134226.72221-14-philmd@linaro.org>
---
target/riscv/vector_helper.c | 32 ++++++++++++-------------
target/riscv/insn_trans/trans_rvv.c.inc | 16 ++++++-------
2 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 41ea2231067..2de3358ee86 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -235,26 +235,26 @@ vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
void *vd, uint32_t evl, uint32_t reg_start, void *host,
uint32_t esz, bool is_load)
{
-#if HOST_BIG_ENDIAN
- for (; reg_start < evl; reg_start++, host += esz) {
- ldst_host(vd, reg_start, host);
- }
-#else
- if (esz == 1) {
- uint32_t byte_offset = reg_start * esz;
- uint32_t size = (evl - reg_start) * esz;
-
- if (is_load) {
- memcpy(vd + byte_offset, host, size);
- } else {
- memcpy(host, vd + byte_offset, size);
- }
- } else {
+ if (HOST_BIG_ENDIAN) {
for (; reg_start < evl; reg_start++, host += esz) {
ldst_host(vd, reg_start, host);
}
+ } else {
+ if (esz == 1) {
+ uint32_t byte_offset = reg_start * esz;
+ uint32_t size = (evl - reg_start) * esz;
+
+ if (is_load) {
+ memcpy(vd + byte_offset, host, size);
+ } else {
+ memcpy(host, vd + byte_offset, size);
+ }
+ } else {
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+ }
}
-#endif
}
static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index f4b5460340e..2a487179f63 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -3351,19 +3351,19 @@ static void load_element(TCGv_i64 dest, TCGv_ptr base,
/* offset of the idx element with base register r */
static uint32_t endian_ofs(DisasContext *s, int r, int idx)
{
-#if HOST_BIG_ENDIAN
- return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
-#else
- return vreg_ofs(s, r) + (idx << s->sew);
-#endif
+ if (HOST_BIG_ENDIAN) {
+ return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
+ } else {
+ return vreg_ofs(s, r) + (idx << s->sew);
+ }
}
/* adjust the index according to the endian */
static void endian_adjust(TCGv_i32 ofs, int sew)
{
-#if HOST_BIG_ENDIAN
- tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
-#endif
+ if (HOST_BIG_ENDIAN) {
+ tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
+ }
}
/* Load idx >= VLMAX ? 0 : vreg[idx] */
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 42/75] target/rx: Replace target_ulong -> vaddr for translator API uses
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (40 preceding siblings ...)
2025-10-16 12:14 ` [PULL 41/75] target/riscv: Replace HOST_BIG_ENDIAN #ifdef with if() check Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:14 ` [PULL 43/75] target/rx: Use MemOp type in gen_ld[u]() and gen_st() Philippe Mathieu-Daudé
` (32 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
Since commit b1c09220b4c ("accel/tcg: Replace target_ulong with
vaddr in translator_*()") the API takes vaddr argument, not
target_ulong. Update the 2 callers.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-2-philmd@linaro.org>
---
target/rx/translate.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 5fc589c706b..1c911cd9a67 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -85,7 +85,7 @@ static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
static uint32_t li(DisasContext *ctx, int sz)
{
- target_ulong addr;
+ vaddr addr;
uint32_t tmp;
CPURXState *env = ctx->env;
addr = ctx->base.pc_next;
@@ -147,8 +147,7 @@ void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
-static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx,
- target_ulong dest)
+static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
{
if (translator_use_goto_tb(&dc->base, dest)) {
tcg_gen_goto_tb(tb_slot_idx);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 43/75] target/rx: Use MemOp type in gen_ld[u]() and gen_st()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (41 preceding siblings ...)
2025-10-16 12:14 ` [PULL 42/75] target/rx: Replace target_ulong -> vaddr for translator API uses Philippe Mathieu-Daudé
@ 2025-10-16 12:14 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 44/75] target/rx: Propagate DisasContext to generated helpers Philippe Mathieu-Daudé
` (31 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:14 UTC (permalink / raw)
To: qemu-devel
The @size argument is of MemOp type. All callers respect that.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-3-philmd@linaro.org>
---
target/rx/translate.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 1c911cd9a67..744edd56afc 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -161,19 +161,19 @@ static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
}
/* generic load wrapper */
-static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
+static inline void rx_gen_ld(MemOp size, TCGv reg, TCGv mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
}
/* unsigned load wrapper */
-static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
+static inline void rx_gen_ldu(MemOp size, TCGv reg, TCGv mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
}
/* generic store wrapper */
-static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
+static inline void rx_gen_st(MemOp size, TCGv reg, TCGv mem)
{
tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 44/75] target/rx: Propagate DisasContext to generated helpers
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (42 preceding siblings ...)
2025-10-16 12:14 ` [PULL 43/75] target/rx: Use MemOp type in gen_ld[u]() and gen_st() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 45/75] target/rx: Propagate DisasContext to push() / pop() Philippe Mathieu-Daudé
` (30 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-4-philmd@linaro.org>
---
target/rx/translate.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 744edd56afc..b314d94b018 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -1894,7 +1894,7 @@ static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
return true;
}
-static void rx_bsetm(TCGv mem, TCGv mask)
+static void rx_bsetm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
@@ -1903,7 +1903,7 @@ static void rx_bsetm(TCGv mem, TCGv mask)
rx_gen_st(MO_8, val, mem);
}
-static void rx_bclrm(TCGv mem, TCGv mask)
+static void rx_bclrm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
@@ -1912,7 +1912,7 @@ static void rx_bclrm(TCGv mem, TCGv mask)
rx_gen_st(MO_8, val, mem);
}
-static void rx_btstm(TCGv mem, TCGv mask)
+static void rx_btstm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
@@ -1922,7 +1922,7 @@ static void rx_btstm(TCGv mem, TCGv mask)
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
}
-static void rx_bnotm(TCGv mem, TCGv mask)
+static void rx_bnotm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
@@ -1931,17 +1931,17 @@ static void rx_bnotm(TCGv mem, TCGv mask)
rx_gen_st(MO_8, val, mem);
}
-static void rx_bsetr(TCGv reg, TCGv mask)
+static void rx_bsetr(DisasContext *ctx, TCGv reg, TCGv mask)
{
tcg_gen_or_i32(reg, reg, mask);
}
-static void rx_bclrr(TCGv reg, TCGv mask)
+static void rx_bclrr(DisasContext *ctx, TCGv reg, TCGv mask)
{
tcg_gen_andc_i32(reg, reg, mask);
}
-static inline void rx_btstr(TCGv reg, TCGv mask)
+static inline void rx_btstr(DisasContext *ctx, TCGv reg, TCGv mask)
{
TCGv t0;
t0 = tcg_temp_new();
@@ -1950,7 +1950,7 @@ static inline void rx_btstr(TCGv reg, TCGv mask)
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
}
-static inline void rx_bnotr(TCGv reg, TCGv mask)
+static inline void rx_bnotr(DisasContext *ctx, TCGv reg, TCGv mask)
{
tcg_gen_xor_i32(reg, reg, mask);
}
@@ -1963,7 +1963,7 @@ static inline void rx_bnotr(TCGv reg, TCGv mask)
mem = tcg_temp_new(); \
mask = tcg_constant_i32(1 << a->imm); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
- cat3(rx_, op, m)(addr, mask); \
+ cat3(rx_, op, m)(ctx, addr, mask); \
return true; \
} \
static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
@@ -1971,7 +1971,7 @@ static inline void rx_bnotr(TCGv reg, TCGv mask)
{ \
TCGv mask; \
mask = tcg_constant_i32(1 << a->imm); \
- cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
+ cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \
return true; \
} \
static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
@@ -1982,7 +1982,7 @@ static inline void rx_bnotr(TCGv reg, TCGv mask)
b = tcg_temp_new(); \
tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
- cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
+ cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \
return true; \
} \
static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
@@ -1995,7 +1995,7 @@ static inline void rx_bnotr(TCGv reg, TCGv mask)
tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
mem = tcg_temp_new(); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
- cat3(rx_, op, m)(addr, mask); \
+ cat3(rx_, op, m)(ctx, addr, mask); \
return true; \
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 45/75] target/rx: Propagate DisasContext to push() / pop()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (43 preceding siblings ...)
2025-10-16 12:15 ` [PULL 44/75] target/rx: Propagate DisasContext to generated helpers Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 46/75] target/rx: Propagate DisasContext to gen_ld[u]() and gen_st() Philippe Mathieu-Daudé
` (29 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-5-philmd@linaro.org>
---
target/rx/translate.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index b314d94b018..912daaf9fb6 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -414,13 +414,13 @@ static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
}
}
-static void push(TCGv val)
+static void push(DisasContext *ctx, TCGv val)
{
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
rx_gen_st(MO_32, val, cpu_sp);
}
-static void pop(TCGv ret)
+static void pop(DisasContext *ctx, TCGv ret)
{
rx_gen_ld(MO_32, ret, cpu_sp);
tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
@@ -619,7 +619,7 @@ static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
{
TCGv val;
val = tcg_temp_new();
- pop(val);
+ pop(ctx, val);
move_to_cr(ctx, val, a->cr);
return true;
}
@@ -634,7 +634,7 @@ static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
}
r = a->rd;
while (r <= a->rd2 && r < 16) {
- pop(cpu_regs[r++]);
+ pop(ctx, cpu_regs[r++]);
}
return true;
}
@@ -670,7 +670,7 @@ static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
TCGv val;
val = tcg_temp_new();
move_from_cr(ctx, val, a->cr, ctx->pc);
- push(val);
+ push(ctx, val);
return true;
}
@@ -685,7 +685,7 @@ static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
}
r = a->rs2;
while (r >= a->rs && r >= 0) {
- push(cpu_regs[r--]);
+ push(ctx, cpu_regs[r--]);
}
return true;
}
@@ -772,7 +772,7 @@ static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
{
tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
- pop(cpu_pc);
+ pop(ctx, cpu_pc);
ctx->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -792,9 +792,9 @@ static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
dst = a->rd;
while (dst <= a->rd2 && dst < 16) {
- pop(cpu_regs[dst++]);
+ pop(ctx, cpu_regs[dst++]);
}
- pop(cpu_pc);
+ pop(ctx, cpu_pc);
ctx->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -1585,7 +1585,7 @@ static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
static inline void rx_save_pc(DisasContext *ctx)
{
TCGv pc = tcg_constant_i32(ctx->base.pc_next);
- push(pc);
+ push(ctx, pc);
}
/* jmp rs */
@@ -1626,7 +1626,7 @@ static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
/* rts */
static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
{
- pop(cpu_pc);
+ pop(ctx, cpu_pc);
ctx->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -2154,8 +2154,8 @@ static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
TCGv psw;
if (is_privileged(ctx, 1)) {
psw = tcg_temp_new();
- pop(cpu_pc);
- pop(psw);
+ pop(ctx, cpu_pc);
+ pop(ctx, psw);
gen_helper_set_psw_rte(tcg_env, psw);
ctx->base.is_jmp = DISAS_EXIT;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 46/75] target/rx: Propagate DisasContext to gen_ld[u]() and gen_st()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (44 preceding siblings ...)
2025-10-16 12:15 ` [PULL 45/75] target/rx: Propagate DisasContext to push() / pop() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 47/75] target/rx: Factor mo_endian() helper out Philippe Mathieu-Daudé
` (28 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-6-philmd@linaro.org>
---
target/rx/translate.c | 64 +++++++++++++++++++++----------------------
1 file changed, 32 insertions(+), 32 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 912daaf9fb6..3c7bcc9933d 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -161,19 +161,19 @@ static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
}
/* generic load wrapper */
-static inline void rx_gen_ld(MemOp size, TCGv reg, TCGv mem)
+static inline void rx_gen_ld(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
}
/* unsigned load wrapper */
-static inline void rx_gen_ldu(MemOp size, TCGv reg, TCGv mem)
+static inline void rx_gen_ldu(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
}
/* generic store wrapper */
-static inline void rx_gen_st(MemOp size, TCGv reg, TCGv mem)
+static inline void rx_gen_st(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
{
tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
}
@@ -417,12 +417,12 @@ static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
static void push(DisasContext *ctx, TCGv val)
{
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
- rx_gen_st(MO_32, val, cpu_sp);
+ rx_gen_st(ctx, MO_32, val, cpu_sp);
}
static void pop(DisasContext *ctx, TCGv ret)
{
- rx_gen_ld(MO_32, ret, cpu_sp);
+ rx_gen_ld(ctx, MO_32, ret, cpu_sp);
tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
}
@@ -432,7 +432,7 @@ static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
TCGv mem;
mem = tcg_temp_new();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
- rx_gen_st(a->sz, cpu_regs[a->rs], mem);
+ rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem);
return true;
}
@@ -442,7 +442,7 @@ static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
TCGv mem;
mem = tcg_temp_new();
tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
- rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
@@ -463,7 +463,7 @@ static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
imm = tcg_constant_i32(a->imm);
mem = tcg_temp_new();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
- rx_gen_st(a->sz, imm, mem);
+ rx_gen_st(ctx, a->sz, imm, mem);
return true;
}
@@ -473,7 +473,7 @@ static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
TCGv mem;
mem = tcg_temp_new();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
- rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
@@ -483,7 +483,7 @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
TCGv mem;
mem = tcg_temp_new();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
- rx_gen_st(a->sz, cpu_regs[a->rs], mem);
+ rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem);
return true;
}
@@ -505,18 +505,18 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
if (a->lds == 3) {
/* mov.<bwl> rs,dsp[rd] */
addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
- rx_gen_st(a->sz, cpu_regs[a->rd], addr);
+ rx_gen_st(ctx, a->sz, cpu_regs[a->rd], addr);
} else if (a->ldd == 3) {
/* mov.<bwl> dsp[rs],rd */
addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
- rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
+ rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], addr);
} else {
/* mov.<bwl> dsp[rs],dsp[rd] */
tmp = tcg_temp_new();
addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
- rx_gen_ld(a->sz, tmp, addr);
+ rx_gen_ld(ctx, a->sz, tmp, addr);
addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
- rx_gen_st(a->sz, tmp, addr);
+ rx_gen_st(ctx, a->sz, tmp, addr);
}
return true;
}
@@ -531,7 +531,7 @@ static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
- rx_gen_st(a->sz, val, cpu_regs[a->rd]);
+ rx_gen_st(ctx, a->sz, val, cpu_regs[a->rd]);
if (a->ad == 0) {
tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -547,7 +547,7 @@ static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
- rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
+ rx_gen_ld(ctx, a->sz, val, cpu_regs[a->rd]);
if (a->ad == 0) {
tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -562,7 +562,7 @@ static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
TCGv mem;
mem = tcg_temp_new();
tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
- rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
@@ -579,7 +579,7 @@ static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
TCGv mem;
mem = tcg_temp_new();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
- rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
+ rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
}
@@ -592,7 +592,7 @@ static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
- rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
+ rx_gen_ldu(ctx, a->sz, val, cpu_regs[a->rd]);
if (a->ad == 0) {
tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -647,7 +647,7 @@ static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
val = tcg_temp_new();
tcg_gen_mov_i32(val, cpu_regs[a->rs]);
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
- rx_gen_st(a->sz, val, cpu_sp);
+ rx_gen_st(ctx, a->sz, val, cpu_sp);
return true;
}
@@ -658,9 +658,9 @@ static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
mem = tcg_temp_new();
val = tcg_temp_new();
addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
- rx_gen_ld(a->sz, val, addr);
+ rx_gen_ld(ctx, a->sz, val, addr);
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
- rx_gen_st(a->sz, val, cpu_sp);
+ rx_gen_st(ctx, a->sz, val, cpu_sp);
return true;
}
@@ -761,7 +761,7 @@ static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
mem = tcg_temp_new();
tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
- rx_gen_st(a->sz, val, addr);
+ rx_gen_st(ctx, a->sz, val, addr);
} else {
tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
}
@@ -1898,25 +1898,25 @@ static void rx_bsetm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_or_i32(val, val, mask);
- rx_gen_st(MO_8, val, mem);
+ rx_gen_st(ctx, MO_8, val, mem);
}
static void rx_bclrm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_andc_i32(val, val, mask);
- rx_gen_st(MO_8, val, mem);
+ rx_gen_st(ctx, MO_8, val, mem);
}
static void rx_btstm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_and_i32(val, val, mask);
tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
@@ -1926,9 +1926,9 @@ static void rx_bnotm(DisasContext *ctx, TCGv mem, TCGv mask)
{
TCGv val;
val = tcg_temp_new();
- rx_gen_ld(MO_8, val, mem);
+ rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_xor_i32(val, val, mask);
- rx_gen_st(MO_8, val, mem);
+ rx_gen_st(ctx, MO_8, val, mem);
}
static void rx_bsetr(DisasContext *ctx, TCGv reg, TCGv mask)
@@ -2023,9 +2023,9 @@ static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
val = tcg_temp_new();
mem = tcg_temp_new();
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
- rx_gen_ld(MO_8, val, addr);
+ rx_gen_ld(ctx, MO_8, val, addr);
bmcnd_op(val, a->cd, a->imm);
- rx_gen_st(MO_8, val, addr);
+ rx_gen_st(ctx, MO_8, val, addr);
return true;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 47/75] target/rx: Factor mo_endian() helper out
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (45 preceding siblings ...)
2025-10-16 12:15 ` [PULL 46/75] target/rx: Propagate DisasContext to gen_ld[u]() and gen_st() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 48/75] target/rx: Replace MO_TE -> MO_LE Philippe Mathieu-Daudé
` (27 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-7-philmd@linaro.org>
---
target/rx/translate.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 3c7bcc9933d..6ed7ef629ff 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -72,6 +72,11 @@ static TCGv_i64 cpu_acc;
#define cpu_sp cpu_regs[0]
+static inline MemOp mo_endian(DisasContext *dc)
+{
+ return MO_TE;
+}
+
/* decoder helper */
static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
int i, int n)
@@ -163,19 +168,19 @@ static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
/* generic load wrapper */
static inline void rx_gen_ld(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
{
- tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
+ tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | mo_endian(ctx));
}
/* unsigned load wrapper */
static inline void rx_gen_ldu(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
{
- tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
+ tcg_gen_qemu_ld_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* generic store wrapper */
static inline void rx_gen_st(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
{
- tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
+ tcg_gen_qemu_st_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* [ri, rb] */
@@ -226,7 +231,7 @@ static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
if (ld < 3) {
mop = mi_to_mop(mi);
addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
- tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
+ tcg_gen_qemu_ld_i32(mem, addr, 0, mop | mo_endian(ctx));
return mem;
} else {
return cpu_regs[rs];
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 48/75] target/rx: Replace MO_TE -> MO_LE
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (46 preceding siblings ...)
2025-10-16 12:15 ` [PULL 47/75] target/rx: Factor mo_endian() helper out Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 49/75] target/rx: Expand TCG register definitions for 32-bit target Philippe Mathieu-Daudé
` (26 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
We only build the RX targets using little endianness order:
$ git grep TARGET_BIG_ENDIAN configs/targets/rx-*
$
Therefore the MO_TE definition always expands to MO_LE.
Use the latter to simplify.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-8-philmd@linaro.org>
---
target/rx/translate.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 6ed7ef629ff..f2168243c88 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -74,7 +74,7 @@ static TCGv_i64 cpu_acc;
static inline MemOp mo_endian(DisasContext *dc)
{
- return MO_TE;
+ return MO_LE;
}
/* decoder helper */
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 49/75] target/rx: Expand TCG register definitions for 32-bit target
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (47 preceding siblings ...)
2025-10-16 12:15 ` [PULL 48/75] target/rx: Replace MO_TE -> MO_LE Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 50/75] target/rx: Un-inline various helpers Philippe Mathieu-Daudé
` (25 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
The RX target is only built as 32-bit:
$ git grep TARGET_LONG_BITS configs/targets/rx-*
configs/targets/rx-softmmu.mak:5:TARGET_LONG_BITS=32
Therefore target_ulong always expands to uint32_t.
Replace and adapt the API uses mechanically:
TCGv -> TCGv_i32
tcg_temp_new -> tcg_temp_new_i32
There is no functional change.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009151607.26278-9-philmd@linaro.org>
---
target/rx/translate.c | 326 +++++++++++++++++++++---------------------
1 file changed, 165 insertions(+), 161 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index f2168243c88..55285d8166f 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -40,8 +40,8 @@ typedef struct DisasContext {
} DisasContext;
typedef struct DisasCompare {
- TCGv value;
- TCGv temp;
+ TCGv_i32 value;
+ TCGv_i32 temp;
TCGCond cond;
} DisasCompare;
@@ -63,11 +63,11 @@ const char *rx_crname(uint8_t cr)
#define DISAS_EXIT DISAS_TARGET_2
/* global register indexes */
-static TCGv cpu_regs[16];
-static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
-static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
-static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
-static TCGv cpu_fintv, cpu_intb, cpu_pc;
+static TCGv_i32 cpu_regs[16];
+static TCGv_i32 cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
+static TCGv_i32 cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
+static TCGv_i32 cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
+static TCGv_i32 cpu_fintv, cpu_intb, cpu_pc;
static TCGv_i64 cpu_acc;
#define cpu_sp cpu_regs[0]
@@ -166,25 +166,28 @@ static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
}
/* generic load wrapper */
-static inline void rx_gen_ld(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
+static inline void rx_gen_ld(DisasContext *ctx, MemOp size,
+ TCGv_i32 reg, TCGv_i32 mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | mo_endian(ctx));
}
/* unsigned load wrapper */
-static inline void rx_gen_ldu(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
+static inline void rx_gen_ldu(DisasContext *ctx, MemOp size,
+ TCGv_i32 reg, TCGv_i32 mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* generic store wrapper */
-static inline void rx_gen_st(DisasContext *ctx, MemOp size, TCGv reg, TCGv mem)
+static inline void rx_gen_st(DisasContext *ctx, MemOp size,
+ TCGv_i32 reg, TCGv_i32 mem)
{
tcg_gen_qemu_st_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* [ri, rb] */
-static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
+static inline void rx_gen_regindex(DisasContext *ctx, TCGv_i32 mem,
int size, int ri, int rb)
{
tcg_gen_shli_i32(mem, cpu_regs[ri], size);
@@ -192,7 +195,7 @@ static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
}
/* dsp[reg] */
-static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
+static inline TCGv_i32 rx_index_addr(DisasContext *ctx, TCGv_i32 mem,
int ld, int size, int reg)
{
uint32_t dsp;
@@ -223,10 +226,10 @@ static inline MemOp mi_to_mop(unsigned mi)
}
/* load source operand */
-static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
+static inline TCGv_i32 rx_load_source(DisasContext *ctx, TCGv_i32 mem,
int ld, int mi, int rs)
{
- TCGv addr;
+ TCGv_i32 addr;
MemOp mop;
if (ld < 3) {
mop = mi_to_mop(mi);
@@ -320,7 +323,7 @@ static void psw_cond(DisasCompare *dc, uint32_t cond)
}
}
-static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
+static void move_from_cr(DisasContext *ctx, TCGv_i32 ret, int cr, uint32_t pc)
{
switch (cr) {
case 0: /* PSW */
@@ -366,7 +369,7 @@ static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
}
}
-static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
+static void move_to_cr(DisasContext *ctx, TCGv_i32 val, int cr)
{
if (cr >= 8 && !is_privileged(ctx, 0)) {
/* Some control registers can only be written in privileged mode. */
@@ -419,13 +422,13 @@ static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
}
}
-static void push(DisasContext *ctx, TCGv val)
+static void push(DisasContext *ctx, TCGv_i32 val)
{
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
rx_gen_st(ctx, MO_32, val, cpu_sp);
}
-static void pop(DisasContext *ctx, TCGv ret)
+static void pop(DisasContext *ctx, TCGv_i32 ret)
{
rx_gen_ld(ctx, MO_32, ret, cpu_sp);
tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
@@ -434,8 +437,8 @@ static void pop(DisasContext *ctx, TCGv ret)
/* mov.<bwl> rs,dsp5[rd] */
static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem);
return true;
@@ -444,8 +447,8 @@ static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
/* mov.<bwl> dsp5[rs],rd */
static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
@@ -464,9 +467,9 @@ static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
/* mov.<bwl> #imm, dsp[rd] */
static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
{
- TCGv imm, mem;
+ TCGv_i32 imm, mem;
imm = tcg_constant_i32(a->imm);
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
rx_gen_st(ctx, a->sz, imm, mem);
return true;
@@ -475,8 +478,8 @@ static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
/* mov.<bwl> [ri,rb],rd */
static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
@@ -485,8 +488,8 @@ static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
/* mov.<bwl> rd,[ri,rb] */
static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem);
return true;
@@ -498,7 +501,7 @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
/* mov.<bwl> rs,rd */
static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
{
- TCGv tmp, mem, addr;
+ TCGv_i32 tmp, mem, addr;
if (a->lds == 3 && a->ldd == 3) {
/* mov.<bwl> rs,rd */
@@ -506,7 +509,7 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
return true;
}
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
if (a->lds == 3) {
/* mov.<bwl> rs,dsp[rd] */
addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
@@ -517,7 +520,7 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], addr);
} else {
/* mov.<bwl> dsp[rs],dsp[rd] */
- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i32();
addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
rx_gen_ld(ctx, a->sz, tmp, addr);
addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
@@ -530,8 +533,8 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
/* mov.<bwl> rs,[-rd] */
static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
tcg_gen_mov_i32(val, cpu_regs[a->rs]);
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
@@ -547,8 +550,8 @@ static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
/* mov.<bwl> [-rd],rs */
static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -564,8 +567,8 @@ static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
/* movu.<bw> dsp[rs],rd */
static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
@@ -581,8 +584,8 @@ static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
/* movu.<bw> [ri,rb],rd */
static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
{
- TCGv mem;
- mem = tcg_temp_new();
+ TCGv_i32 mem;
+ mem = tcg_temp_new_i32();
rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem);
return true;
@@ -592,8 +595,8 @@ static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
/* mov.<bw> [-rd],rs */
static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
if (a->ad == 1) {
tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
}
@@ -622,8 +625,8 @@ static bool trans_POP(DisasContext *ctx, arg_POP *a)
/* popc cr */
static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
pop(ctx, val);
move_to_cr(ctx, val, a->cr);
return true;
@@ -648,8 +651,8 @@ static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
/* push.<bwl> rs */
static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
tcg_gen_mov_i32(val, cpu_regs[a->rs]);
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
rx_gen_st(ctx, a->sz, val, cpu_sp);
@@ -659,9 +662,9 @@ static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
/* push.<bwl> dsp[rs] */
static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
{
- TCGv mem, val, addr;
- mem = tcg_temp_new();
- val = tcg_temp_new();
+ TCGv_i32 mem, val, addr;
+ mem = tcg_temp_new_i32();
+ val = tcg_temp_new_i32();
addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
rx_gen_ld(ctx, a->sz, val, addr);
tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
@@ -672,8 +675,8 @@ static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
/* pushc rx */
static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
move_from_cr(ctx, val, a->cr, ctx->pc);
push(ctx, val);
return true;
@@ -698,8 +701,8 @@ static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
/* xchg rs,rd */
static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
@@ -709,8 +712,8 @@ static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
/* xchg dsp[rs].<mi>,rd */
static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
{
- TCGv mem, addr;
- mem = tcg_temp_new();
+ TCGv_i32 mem, addr;
+ mem = tcg_temp_new_i32();
switch (a->mi) {
case 0: /* dsp[rs].b */
case 1: /* dsp[rs].w */
@@ -731,8 +734,8 @@ static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
static inline void stcond(TCGCond cond, int rd, int imm)
{
- TCGv z;
- TCGv _imm;
+ TCGv_i32 z;
+ TCGv_i32 _imm;
z = tcg_constant_i32(0);
_imm = tcg_constant_i32(imm);
tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
@@ -758,12 +761,12 @@ static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
{
DisasCompare dc;
- TCGv val, mem, addr;
- dc.temp = tcg_temp_new();
+ TCGv_i32 val, mem, addr;
+ dc.temp = tcg_temp_new_i32();
psw_cond(&dc, a->cd);
if (a->ld < 3) {
- val = tcg_temp_new();
- mem = tcg_temp_new();
+ val = tcg_temp_new_i32();
+ mem = tcg_temp_new_i32();
tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
rx_gen_st(ctx, a->sz, val, addr);
@@ -804,8 +807,8 @@ static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
return true;
}
-typedef void (*op2fn)(TCGv ret, TCGv arg1);
-typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
+typedef void (*op2fn)(TCGv_i32 ret, TCGv_i32 arg1);
+typedef void (*op3fn)(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
{
@@ -819,20 +822,20 @@ static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
{
- TCGv imm = tcg_constant_i32(src2);
+ TCGv_i32 imm = tcg_constant_i32(src2);
opr(cpu_regs[dst], cpu_regs[src], imm);
}
static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
int dst, int src, int ld, int mi)
{
- TCGv val, mem;
- mem = tcg_temp_new();
+ TCGv_i32 val, mem;
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, ld, mi, src);
opr(cpu_regs[dst], cpu_regs[dst], val);
}
-static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_and(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -862,7 +865,7 @@ static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
return true;
}
-static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_or(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -892,7 +895,7 @@ static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
return true;
}
-static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_xor(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -914,7 +917,7 @@ static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
return true;
}
-static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_tst(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
@@ -935,7 +938,7 @@ static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
return true;
}
-static void rx_not(TCGv ret, TCGv arg1)
+static void rx_not(TCGv_i32 ret, TCGv_i32 arg1)
{
tcg_gen_not_i32(ret, arg1);
tcg_gen_mov_i32(cpu_psw_z, ret);
@@ -950,7 +953,7 @@ static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
return true;
}
-static void rx_neg(TCGv ret, TCGv arg1)
+static void rx_neg(TCGv_i32 ret, TCGv_i32 arg1)
{
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
tcg_gen_neg_i32(ret, arg1);
@@ -969,9 +972,9 @@ static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
}
/* ret = arg1 + arg2 + psw_c */
-static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_adc(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- TCGv z = tcg_constant_i32(0);
+ TCGv_i32 z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
@@ -1007,9 +1010,9 @@ static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
}
/* ret = arg1 + arg2 */
-static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_add(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- TCGv z = tcg_constant_i32(0);
+ TCGv_i32 z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
@@ -1042,7 +1045,7 @@ static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
}
/* ret = arg1 - arg2 */
-static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_sub(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
@@ -1056,17 +1059,17 @@ static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
}
}
-static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
+static void rx_cmp(TCGv_i32 dummy, TCGv_i32 arg1, TCGv_i32 arg2)
{
rx_sub(NULL, arg1, arg2);
}
/* ret = arg1 - arg2 - !psw_c */
/* -> ret = arg1 + ~arg2 + psw_c */
-static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_sbb(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- TCGv temp;
- temp = tcg_temp_new();
+ TCGv_i32 temp;
+ temp = tcg_temp_new_i32();
tcg_gen_not_i32(temp, arg2);
rx_adc(ret, arg1, temp);
}
@@ -1192,7 +1195,7 @@ static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
/* emul #imm, rd */
static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
{
- TCGv imm = tcg_constant_i32(a->imm);
+ TCGv_i32 imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1205,11 +1208,11 @@ static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
/* emul dsp[rs], rd */
static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
{
- TCGv val, mem;
+ TCGv_i32 val, mem;
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
cpu_regs[a->rd], val);
@@ -1219,7 +1222,7 @@ static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
/* emulu #imm, rd */
static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
{
- TCGv imm = tcg_constant_i32(a->imm);
+ TCGv_i32 imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1232,23 +1235,23 @@ static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
/* emulu dsp[rs], rd */
static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
{
- TCGv val, mem;
+ TCGv_i32 val, mem;
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
- mem = tcg_temp_new();
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
cpu_regs[a->rd], val);
return true;
}
-static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_div(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
gen_helper_div(ret, tcg_env, arg1, arg2);
}
-static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
+static void rx_divu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
gen_helper_divu(ret, tcg_env, arg1, arg2);
}
@@ -1288,8 +1291,8 @@ static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
/* shll #imm:5, rs2, rd */
static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
if (a->imm) {
tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
@@ -1311,14 +1314,14 @@ static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
{
TCGLabel *noshift, *done;
- TCGv count, tmp;
+ TCGv_i32 count, tmp;
noshift = gen_new_label();
done = gen_new_label();
/* if (cpu_regs[a->rs]) { */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
- count = tcg_temp_new();
- tmp = tcg_temp_new();
+ count = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp);
tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
@@ -1342,7 +1345,7 @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
unsigned int alith)
{
- static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
+ static void (* const gen_sXri[])(TCGv_i32 ret, TCGv_i32 arg1, int arg2) = {
tcg_gen_shri_i32, tcg_gen_sari_i32,
};
tcg_debug_assert(alith < 2);
@@ -1362,17 +1365,18 @@ static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
{
TCGLabel *noshift, *done;
- TCGv count;
- static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
+ TCGv_i32 count;
+ static void (* const gen_sXri[])(TCGv_i32 ret, TCGv_i32 arg1, int arg2) = {
tcg_gen_shri_i32, tcg_gen_sari_i32,
};
- static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
+ static void (* const gen_sXr[])(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2) = {
tcg_gen_shr_i32, tcg_gen_sar_i32,
};
tcg_debug_assert(alith < 2);
noshift = gen_new_label();
done = gen_new_label();
- count = tcg_temp_new();
+ count = tcg_temp_new_i32();
/* if (cpu_regs[rs]) { */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
tcg_gen_andi_i32(count, cpu_regs[rs], 31);
@@ -1424,8 +1428,8 @@ static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
/* rolc rd */
static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
@@ -1438,8 +1442,8 @@ static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
/* rorc rd */
static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
@@ -1514,8 +1518,8 @@ static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
/* revw rs, rd */
static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
{
- TCGv tmp;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
tcg_gen_shli_i32(tmp, tmp, 8);
tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
@@ -1532,7 +1536,7 @@ static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
switch (cd) {
case 0 ... 13:
- dc.temp = tcg_temp_new();
+ dc.temp = tcg_temp_new_i32();
psw_cond(&dc, cd);
t = gen_new_label();
done = gen_new_label();
@@ -1589,7 +1593,7 @@ static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
static inline void rx_save_pc(DisasContext *ctx)
{
- TCGv pc = tcg_constant_i32(ctx->base.pc_next);
+ TCGv_i32 pc = tcg_constant_i32(ctx->base.pc_next);
push(ctx, pc);
}
@@ -1672,7 +1676,7 @@ static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
#define STRING(op) \
do { \
- TCGv size = tcg_constant_i32(a->sz); \
+ TCGv_i32 size = tcg_constant_i32(a->sz); \
gen_helper_##op(tcg_env, size); \
} while (0)
@@ -1803,7 +1807,7 @@ static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
/* racw #imm */
static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
{
- TCGv imm = tcg_constant_i32(a->imm + 1);
+ TCGv_i32 imm = tcg_constant_i32(a->imm + 1);
gen_helper_racw(tcg_env, imm);
return true;
}
@@ -1811,8 +1815,8 @@ static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
/* sat rd */
static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
{
- TCGv tmp, z;
- tmp = tcg_temp_new();
+ TCGv_i32 tmp, z;
+ tmp = tcg_temp_new_i32();
z = tcg_constant_i32(0);
/* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
@@ -1835,7 +1839,7 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
cat3(arg_, name, _ir) * a) \
{ \
- TCGv imm = tcg_constant_i32(li(ctx, 0)); \
+ TCGv_i32 imm = tcg_constant_i32(li(ctx, 0)); \
gen_helper_##op(cpu_regs[a->rd], tcg_env, \
cpu_regs[a->rd], imm); \
return true; \
@@ -1843,8 +1847,8 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
cat3(arg_, name, _mr) * a) \
{ \
- TCGv val, mem; \
- mem = tcg_temp_new(); \
+ TCGv_i32 val, mem; \
+ mem = tcg_temp_new_i32(); \
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
gen_helper_##op(cpu_regs[a->rd], tcg_env, \
cpu_regs[a->rd], val); \
@@ -1854,8 +1858,8 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
#define FCONVOP(name, op) \
static bool trans_##name(DisasContext *ctx, arg_##name * a) \
{ \
- TCGv val, mem; \
- mem = tcg_temp_new(); \
+ TCGv_i32 val, mem; \
+ mem = tcg_temp_new_i32(); \
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \
return true; \
@@ -1869,7 +1873,7 @@ FOP(FDIV, fdiv)
/* fcmp #imm, rd */
static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
{
- TCGv imm = tcg_constant_i32(li(ctx, 0));
+ TCGv_i32 imm = tcg_constant_i32(li(ctx, 0));
gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm);
return true;
}
@@ -1878,8 +1882,8 @@ static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
/* fcmp rs, rd */
static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
{
- TCGv val, mem;
- mem = tcg_temp_new();
+ TCGv_i32 val, mem;
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val);
return true;
@@ -1892,70 +1896,70 @@ FCONVOP(ROUND, round)
/* itof dsp[rs], rd */
static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
{
- TCGv val, mem;
- mem = tcg_temp_new();
+ TCGv_i32 val, mem;
+ mem = tcg_temp_new_i32();
val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
gen_helper_itof(cpu_regs[a->rd], tcg_env, val);
return true;
}
-static void rx_bsetm(DisasContext *ctx, TCGv mem, TCGv mask)
+static void rx_bsetm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_or_i32(val, val, mask);
rx_gen_st(ctx, MO_8, val, mem);
}
-static void rx_bclrm(DisasContext *ctx, TCGv mem, TCGv mask)
+static void rx_bclrm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_andc_i32(val, val, mask);
rx_gen_st(ctx, MO_8, val, mem);
}
-static void rx_btstm(DisasContext *ctx, TCGv mem, TCGv mask)
+static void rx_btstm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_and_i32(val, val, mask);
tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
}
-static void rx_bnotm(DisasContext *ctx, TCGv mem, TCGv mask)
+static void rx_bnotm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask)
{
- TCGv val;
- val = tcg_temp_new();
+ TCGv_i32 val;
+ val = tcg_temp_new_i32();
rx_gen_ld(ctx, MO_8, val, mem);
tcg_gen_xor_i32(val, val, mask);
rx_gen_st(ctx, MO_8, val, mem);
}
-static void rx_bsetr(DisasContext *ctx, TCGv reg, TCGv mask)
+static void rx_bsetr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
tcg_gen_or_i32(reg, reg, mask);
}
-static void rx_bclrr(DisasContext *ctx, TCGv reg, TCGv mask)
+static void rx_bclrr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
tcg_gen_andc_i32(reg, reg, mask);
}
-static inline void rx_btstr(DisasContext *ctx, TCGv reg, TCGv mask)
+static inline void rx_btstr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
- TCGv t0;
- t0 = tcg_temp_new();
+ TCGv_i32 t0;
+ t0 = tcg_temp_new_i32();
tcg_gen_and_i32(t0, reg, mask);
tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
}
-static inline void rx_bnotr(DisasContext *ctx, TCGv reg, TCGv mask)
+static inline void rx_bnotr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
tcg_gen_xor_i32(reg, reg, mask);
}
@@ -1964,8 +1968,8 @@ static inline void rx_bnotr(DisasContext *ctx, TCGv reg, TCGv mask)
static bool cat3(trans_, name, _im)(DisasContext *ctx, \
cat3(arg_, name, _im) * a) \
{ \
- TCGv mask, mem, addr; \
- mem = tcg_temp_new(); \
+ TCGv_i32 mask, mem, addr; \
+ mem = tcg_temp_new_i32(); \
mask = tcg_constant_i32(1 << a->imm); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
cat3(rx_, op, m)(ctx, addr, mask); \
@@ -1974,7 +1978,7 @@ static inline void rx_bnotr(DisasContext *ctx, TCGv reg, TCGv mask)
static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
cat3(arg_, name, _ir) * a) \
{ \
- TCGv mask; \
+ TCGv_i32 mask; \
mask = tcg_constant_i32(1 << a->imm); \
cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \
return true; \
@@ -1982,9 +1986,9 @@ static inline void rx_bnotr(DisasContext *ctx, TCGv reg, TCGv mask)
static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
cat3(arg_, name, _rr) * a) \
{ \
- TCGv mask, b; \
- mask = tcg_temp_new(); \
- b = tcg_temp_new(); \
+ TCGv_i32 mask, b; \
+ mask = tcg_temp_new_i32(); \
+ b = tcg_temp_new_i32(); \
tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \
@@ -1993,12 +1997,12 @@ static inline void rx_bnotr(DisasContext *ctx, TCGv reg, TCGv mask)
static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
cat3(arg_, name, _rm) * a) \
{ \
- TCGv mask, mem, addr, b; \
- mask = tcg_temp_new(); \
- b = tcg_temp_new(); \
+ TCGv_i32 mask, mem, addr, b; \
+ mask = tcg_temp_new_i32(); \
+ b = tcg_temp_new_i32(); \
tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
- mem = tcg_temp_new(); \
+ mem = tcg_temp_new_i32(); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
cat3(rx_, op, m)(ctx, addr, mask); \
return true; \
@@ -2009,12 +2013,12 @@ BITOP(BCLR, bclr)
BITOP(BTST, btst)
BITOP(BNOT, bnot)
-static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
+static inline void bmcnd_op(TCGv_i32 val, TCGCond cond, int pos)
{
- TCGv bit;
+ TCGv_i32 bit;
DisasCompare dc;
- dc.temp = tcg_temp_new();
- bit = tcg_temp_new();
+ dc.temp = tcg_temp_new_i32();
+ bit = tcg_temp_new_i32();
psw_cond(&dc, cond);
tcg_gen_andi_i32(val, val, ~(1 << pos));
tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
@@ -2024,9 +2028,9 @@ static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
/* bmcnd #imm, dsp[rd] */
static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
{
- TCGv val, mem, addr;
- val = tcg_temp_new();
- mem = tcg_temp_new();
+ TCGv_i32 val, mem, addr;
+ val = tcg_temp_new_i32();
+ mem = tcg_temp_new_i32();
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
rx_gen_ld(ctx, MO_8, val, addr);
bmcnd_op(val, a->cd, a->imm);
@@ -2118,7 +2122,7 @@ static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
/* mvtc #imm, rd */
static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
{
- TCGv imm;
+ TCGv_i32 imm;
imm = tcg_constant_i32(a->imm);
move_to_cr(ctx, imm, a->cr);
@@ -2142,9 +2146,9 @@ static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
/* rtfi */
static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
{
- TCGv psw;
+ TCGv_i32 psw;
if (is_privileged(ctx, 1)) {
- psw = tcg_temp_new();
+ psw = tcg_temp_new_i32();
tcg_gen_mov_i32(cpu_pc, cpu_bpc);
tcg_gen_mov_i32(psw, cpu_bpsw);
gen_helper_set_psw_rte(tcg_env, psw);
@@ -2156,9 +2160,9 @@ static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
/* rte */
static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
{
- TCGv psw;
+ TCGv_i32 psw;
if (is_privileged(ctx, 1)) {
- psw = tcg_temp_new();
+ psw = tcg_temp_new_i32();
pop(ctx, cpu_pc);
pop(ctx, psw);
gen_helper_set_psw_rte(tcg_env, psw);
@@ -2179,7 +2183,7 @@ static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
/* int #imm */
static bool trans_INT(DisasContext *ctx, arg_INT *a)
{
- TCGv vec;
+ TCGv_i32 vec;
tcg_debug_assert(a->imm < 0x100);
vec = tcg_constant_i32(a->imm);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 50/75] target/rx: Un-inline various helpers
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (48 preceding siblings ...)
2025-10-16 12:15 ` [PULL 49/75] target/rx: Expand TCG register definitions for 32-bit target Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 51/75] target/s390x: Replace HOST_BIG_ENDIAN #ifdef with if() check Philippe Mathieu-Daudé
` (24 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Rely on the linker to optimize at linking time.
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251009200012.33650-1-philmd@linaro.org>
---
target/rx/translate.c | 51 ++++++++++++++++++++-----------------------
1 file changed, 24 insertions(+), 27 deletions(-)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 55285d8166f..ef865f14bf5 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -166,37 +166,34 @@ static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest)
}
/* generic load wrapper */
-static inline void rx_gen_ld(DisasContext *ctx, MemOp size,
- TCGv_i32 reg, TCGv_i32 mem)
+static void rx_gen_ld(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | mo_endian(ctx));
}
/* unsigned load wrapper */
-static inline void rx_gen_ldu(DisasContext *ctx, MemOp size,
- TCGv_i32 reg, TCGv_i32 mem)
+static void rx_gen_ldu(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem)
{
tcg_gen_qemu_ld_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* generic store wrapper */
-static inline void rx_gen_st(DisasContext *ctx, MemOp size,
- TCGv_i32 reg, TCGv_i32 mem)
+static void rx_gen_st(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem)
{
tcg_gen_qemu_st_i32(reg, mem, 0, size | mo_endian(ctx));
}
/* [ri, rb] */
-static inline void rx_gen_regindex(DisasContext *ctx, TCGv_i32 mem,
- int size, int ri, int rb)
+static void rx_gen_regindex(DisasContext *ctx, TCGv_i32 mem,
+ int size, int ri, int rb)
{
tcg_gen_shli_i32(mem, cpu_regs[ri], size);
tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
}
/* dsp[reg] */
-static inline TCGv_i32 rx_index_addr(DisasContext *ctx, TCGv_i32 mem,
- int ld, int size, int reg)
+static TCGv_i32 rx_index_addr(DisasContext *ctx, TCGv_i32 mem,
+ int ld, int size, int reg)
{
uint32_t dsp;
@@ -226,8 +223,8 @@ static inline MemOp mi_to_mop(unsigned mi)
}
/* load source operand */
-static inline TCGv_i32 rx_load_source(DisasContext *ctx, TCGv_i32 mem,
- int ld, int mi, int rs)
+static TCGv_i32 rx_load_source(DisasContext *ctx, TCGv_i32 mem,
+ int ld, int mi, int rs)
{
TCGv_i32 addr;
MemOp mop;
@@ -732,7 +729,7 @@ static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
return true;
}
-static inline void stcond(TCGCond cond, int rd, int imm)
+static void stcond(TCGCond cond, int rd, int imm)
{
TCGv_i32 z;
TCGv_i32 _imm;
@@ -810,24 +807,24 @@ static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
typedef void (*op2fn)(TCGv_i32 ret, TCGv_i32 arg1);
typedef void (*op3fn)(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
+static void rx_gen_op_rr(op2fn opr, int dst, int src)
{
opr(cpu_regs[dst], cpu_regs[src]);
}
-static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
+static void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
{
opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
}
-static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
+static void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
{
TCGv_i32 imm = tcg_constant_i32(src2);
opr(cpu_regs[dst], cpu_regs[src], imm);
}
-static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
- int dst, int src, int ld, int mi)
+static void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
+ int dst, int src, int ld, int mi)
{
TCGv_i32 val, mem;
mem = tcg_temp_new_i32();
@@ -1342,8 +1339,8 @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
return true;
}
-static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
- unsigned int alith)
+static void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
+ unsigned int alith)
{
static void (* const gen_sXri[])(TCGv_i32 ret, TCGv_i32 arg1, int arg2) = {
tcg_gen_shri_i32, tcg_gen_sari_i32,
@@ -1362,7 +1359,7 @@ static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
}
-static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
+static void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
{
TCGLabel *noshift, *done;
TCGv_i32 count;
@@ -1456,7 +1453,7 @@ static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
enum {ROTR = 0, ROTL = 1};
enum {ROT_IMM = 0, ROT_REG = 1};
-static inline void rx_rot(int ir, int dir, int rd, int src)
+static void rx_rot(int ir, int dir, int rd, int src)
{
switch (dir) {
case ROTL:
@@ -1591,7 +1588,7 @@ static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
return true;
}
-static inline void rx_save_pc(DisasContext *ctx)
+static void rx_save_pc(DisasContext *ctx)
{
TCGv_i32 pc = tcg_constant_i32(ctx->base.pc_next);
push(ctx, pc);
@@ -1950,7 +1947,7 @@ static void rx_bclrr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
tcg_gen_andc_i32(reg, reg, mask);
}
-static inline void rx_btstr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
+static void rx_btstr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
TCGv_i32 t0;
t0 = tcg_temp_new_i32();
@@ -1959,7 +1956,7 @@ static inline void rx_btstr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
}
-static inline void rx_bnotr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
+static void rx_bnotr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask)
{
tcg_gen_xor_i32(reg, reg, mask);
}
@@ -2013,7 +2010,7 @@ BITOP(BCLR, bclr)
BITOP(BTST, btst)
BITOP(BNOT, bnot)
-static inline void bmcnd_op(TCGv_i32 val, TCGCond cond, int pos)
+static void bmcnd_op(TCGv_i32 val, TCGCond cond, int pos)
{
TCGv_i32 bit;
DisasCompare dc;
@@ -2054,7 +2051,7 @@ enum {
PSW_U = 9,
};
-static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
+static void clrsetpsw(DisasContext *ctx, int cb, int val)
{
if (cb < 8) {
switch (cb) {
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 51/75] target/s390x: Replace HOST_BIG_ENDIAN #ifdef with if() check
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (49 preceding siblings ...)
2025-10-16 12:15 ` [PULL 50/75] target/rx: Un-inline various helpers Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 52/75] target/sh4: Convert CPUSH4State::sr register to uint32_t type Philippe Mathieu-Daudé
` (23 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Replace preprocessor-time #ifdef with a compile-time check
to ensure all code paths are built and tested. This reduces
build-time configuration complexity and simplifies code
maintainability.
No functional change intended.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Message-Id: <20251010134226.72221-15-philmd@linaro.org>
---
target/s390x/tcg/translate.c | 6 +++---
target/s390x/tcg/translate_vx.c.inc | 6 +++---
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index c7e8574438c..ec9e5a07516 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -258,9 +258,9 @@ static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
* 16 byte operations to handle it in a special way.
*/
g_assert(es <= MO_64);
-#if !HOST_BIG_ENDIAN
- offs ^= (8 - bytes);
-#endif
+ if (!HOST_BIG_ENDIAN) {
+ offs ^= (8 - bytes);
+ }
return offs + vec_full_reg_offset(reg);
}
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index e073e5ad3aa..f3b4b48ab7b 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -175,9 +175,9 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
/* convert it to an element offset relative to tcg_env (vec_reg_offset() */
tcg_gen_shli_i64(tmp, tmp, es);
-#if !HOST_BIG_ENDIAN
- tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
-#endif
+ if (!HOST_BIG_ENDIAN) {
+ tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
+ }
tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
/* generate the final ptr by adding tcg_env */
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 52/75] target/sh4: Convert CPUSH4State::sr register to uint32_t type
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (50 preceding siblings ...)
2025-10-16 12:15 ` [PULL 51/75] target/s390x: Replace HOST_BIG_ENDIAN #ifdef with if() check Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 53/75] target/sh4: Remove target_ulong use in cpu_sh4_is_cached() Philippe Mathieu-Daudé
` (22 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Since its introduction in commit fdf9b3e831e the %SR register
is a uint32_t type.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008064814.90520-2-philmd@linaro.org>
---
target/sh4/cpu.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index c41ab70dd7c..db27a693f12 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -365,14 +365,14 @@ static inline int cpu_ptel_pr (uint32_t ptel)
#define PTEA_TC (1 << 3)
#define cpu_ptea_tc(ptea) (((ptea) & PTEA_TC) >> 3)
-static inline target_ulong cpu_read_sr(CPUSH4State *env)
+static inline uint32_t cpu_read_sr(CPUSH4State *env)
{
return env->sr | (env->sr_m << SR_M) |
(env->sr_q << SR_Q) |
(env->sr_t << SR_T);
}
-static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
+static inline void cpu_write_sr(CPUSH4State *env, uint32_t sr)
{
env->sr_m = (sr >> SR_M) & 1;
env->sr_q = (sr >> SR_Q) & 1;
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 53/75] target/sh4: Remove target_ulong use in cpu_sh4_is_cached()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (51 preceding siblings ...)
2025-10-16 12:15 ` [PULL 52/75] target/sh4: Convert CPUSH4State::sr register to uint32_t type Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 54/75] target/sh4: Use hwaddr type for hardware addresses Philippe Mathieu-Daudé
` (21 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Since commit 852d481faf7 ("SH: Improve movca.l/ocbi emulation")
helper_movcal() pass a uint32_t type to cpu_sh4_is_cached().
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008064814.90520-3-philmd@linaro.org>
---
target/sh4/cpu.h | 2 +-
target/sh4/helper.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index db27a693f12..b0759010c47 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -277,7 +277,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
uint32_t mem_value);
#endif
-int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr);
+int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr);
void cpu_load_tlb(CPUSH4State * env);
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 1744ef0e6d8..4f1e2072296 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -47,7 +47,7 @@
#if defined(CONFIG_USER_ONLY)
-int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
+int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr)
{
/* For user mode, only U0 area is cacheable. */
return !(addr & 0x80000000);
@@ -735,7 +735,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
}
}
-int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
+int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr)
{
int n;
int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 54/75] target/sh4: Use hwaddr type for hardware addresses
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (52 preceding siblings ...)
2025-10-16 12:15 ` [PULL 53/75] target/sh4: Remove target_ulong use in cpu_sh4_is_cached() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 55/75] target/sh4: Remove target_ulong uses in superh_cpu_get_phys_page_debug Philippe Mathieu-Daudé
` (20 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
The CPUClass::get_phys_page_debug() handler returns a 'hwaddr' type.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008064814.90520-4-philmd@linaro.org>
---
target/sh4/helper.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 4f1e2072296..55ab1dc9947 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -325,7 +325,7 @@ static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid
MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION,
MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
*/
-static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
+static int get_mmu_address(CPUSH4State *env, hwaddr *physical,
int *prot, target_ulong address,
MMUAccessType access_type)
{
@@ -392,7 +392,7 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
return n;
}
-static int get_physical_address(CPUSH4State * env, target_ulong * physical,
+static int get_physical_address(CPUSH4State *env, hwaddr* physical,
int *prot, target_ulong address,
MMUAccessType access_type)
{
@@ -433,7 +433,7 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
- target_ulong physical;
+ hwaddr physical;
int prot;
if (get_physical_address(cpu_env(cs), &physical, &prot, addr, MMU_DATA_LOAD)
@@ -800,7 +800,7 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
CPUSH4State *env = cpu_env(cs);
int ret;
- target_ulong physical;
+ hwaddr physical;
int prot;
ret = get_physical_address(env, &physical, &prot, address, access_type);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 55/75] target/sh4: Remove target_ulong uses in superh_cpu_get_phys_page_debug
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (53 preceding siblings ...)
2025-10-16 12:15 ` [PULL 54/75] target/sh4: Use hwaddr type for hardware addresses Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 56/75] target/sh4: Use vaddr type for TLB virtual addresses Philippe Mathieu-Daudé
` (19 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
The CPUClass::get_phys_page_debug() handler takes a 'vaddr' address
type since commit 00b941e581b ("cpu: Turn cpu_get_phys_page_debug()
into a CPUClass hook").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008064814.90520-5-philmd@linaro.org>
---
target/sh4/helper.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 55ab1dc9947..f5c37c2d80d 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -231,11 +231,11 @@ static int itlb_replacement(CPUSH4State * env)
/* Find the corresponding entry in the right TLB
Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE
*/
-static int find_tlb_entry(CPUSH4State * env, target_ulong address,
+static int find_tlb_entry(CPUSH4State *env, vaddr address,
tlb_t * entries, uint8_t nbtlb, int use_asid)
{
int match = MMU_DTLB_MISS;
- uint32_t start, end;
+ vaddr start, end;
uint8_t asid;
int i;
@@ -291,7 +291,7 @@ static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb)
/* Find itlb entry
Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
*/
-static int find_itlb_entry(CPUSH4State * env, target_ulong address,
+static int find_itlb_entry(CPUSH4State *env, vaddr address,
int use_asid)
{
int e;
@@ -309,7 +309,7 @@ static int find_itlb_entry(CPUSH4State * env, target_ulong address,
/* Find utlb entry
Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
-static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid)
+static int find_utlb_entry(CPUSH4State *env, vaddr address, int use_asid)
{
/* per utlb access */
increment_urc(env);
@@ -326,7 +326,7 @@ static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid
MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
*/
static int get_mmu_address(CPUSH4State *env, hwaddr *physical,
- int *prot, target_ulong address,
+ int *prot, vaddr address,
MMUAccessType access_type)
{
int use_asid, n;
@@ -393,7 +393,7 @@ static int get_mmu_address(CPUSH4State *env, hwaddr *physical,
}
static int get_physical_address(CPUSH4State *env, hwaddr* physical,
- int *prot, target_ulong address,
+ int *prot, vaddr address,
MMUAccessType access_type)
{
/* P1, P2 and P4 areas do not use translation */
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 56/75] target/sh4: Use vaddr type for TLB virtual addresses
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (54 preceding siblings ...)
2025-10-16 12:15 ` [PULL 55/75] target/sh4: Remove target_ulong uses in superh_cpu_get_phys_page_debug Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 57/75] target/sh4: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
` (18 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
tlb_flush_page() expects a vaddr type since commit 732d548732e
("accel: Replace target_ulong in tlb_*()").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008064814.90520-6-philmd@linaro.org>
---
target/sh4/helper.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index f5c37c2d80d..3b18a320b86 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -452,7 +452,7 @@ void cpu_load_tlb(CPUSH4State * env)
if (entry->v) {
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(cs, address);
}
@@ -528,7 +528,7 @@ void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
tlb_t * entry = &s->itlb[index];
if (entry->v) {
/* Overwriting valid entry in itlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(env_cpu(s), address);
}
entry->asid = asid;
@@ -570,7 +570,7 @@ void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
/* ITLB Data Array 1 */
if (entry->v) {
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(env_cpu(s), address);
}
entry->ppn = (mem_value & 0x1ffffc00) >> 10;
@@ -665,7 +665,7 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
CPUState *cs = env_cpu(s);
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(cs, address);
}
entry->asid = asid;
@@ -716,7 +716,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
/* UTLB Data Array 1 */
if (entry->v) {
/* Overwriting valid entry in utlb. */
- target_ulong address = entry->vpn << 10;
+ vaddr address = entry->vpn << 10;
tlb_flush_page(env_cpu(s), address);
}
entry->ppn = (mem_value & 0x1ffffc00) >> 10;
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 57/75] target/sh4: Remove target_ulong use in gen_goto_tb()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (55 preceding siblings ...)
2025-10-16 12:15 ` [PULL 56/75] target/sh4: Use vaddr type for TLB virtual addresses Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 58/75] target/sparc: Reduce inclusions of 'exec/cpu-common.h' Philippe Mathieu-Daudé
` (17 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
translator_use_goto_tb() expects a vaddr type since commit
b1c09220b4c ("accel/tcg: Replace target_ulong with vaddr in
translator_*()").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008064814.90520-7-philmd@linaro.org>
---
target/sh4/translate.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 137b75a4fb2..b3ae0a3814c 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -223,7 +223,7 @@ static inline bool use_exit_tb(DisasContext *ctx)
return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
}
-static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+static bool use_goto_tb(DisasContext *ctx, vaddr dest)
{
if (use_exit_tb(ctx)) {
return false;
@@ -231,8 +231,7 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
return translator_use_goto_tb(&ctx->base, dest);
}
-static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx,
- target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, vaddr dest)
{
if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(tb_slot_idx);
@@ -268,7 +267,7 @@ static void gen_jump(DisasContext * ctx)
}
/* Immediate conditional jump (bt or bf) */
-static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
+static void gen_conditional_jump(DisasContext *ctx, vaddr dest,
bool jump_if_true)
{
TCGLabel *l1 = gen_new_label();
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 58/75] target/sparc: Reduce inclusions of 'exec/cpu-common.h'
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (56 preceding siblings ...)
2025-10-16 12:15 ` [PULL 57/75] target/sh4: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 59/75] target/tricore: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
` (16 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Only 2 files require declarations from "exec/cpu-common.h".
Include it there once, instead than polluting all files
including "cpu.h".
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Message-Id: <20251002145742.75624-7-philmd@linaro.org>
Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
---
target/sparc/cpu.h | 1 -
target/sparc/helper.c | 1 +
target/sparc/int64_helper.c | 1 +
3 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 31cb3d97eb1..7169a502432 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -3,7 +3,6 @@
#include "qemu/bswap.h"
#include "cpu-qom.h"
-#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
#include "exec/cpu-interrupt.h"
#include "qemu/cpu-float.h"
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
index 9163b9d46ad..c5d88de37c9 100644
--- a/target/sparc/helper.c
+++ b/target/sparc/helper.c
@@ -21,6 +21,7 @@
#include "cpu.h"
#include "qemu/timer.h"
#include "qemu/host-utils.h"
+#include "exec/cpu-common.h"
#include "exec/helper-proto.h"
void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra)
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
index 23adda4cad7..96ef81c26cd 100644
--- a/target/sparc/int64_helper.c
+++ b/target/sparc/int64_helper.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
+#include "exec/cpu-common.h"
#include "exec/helper-proto.h"
#include "exec/log.h"
#include "trace.h"
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 59/75] target/tricore: Remove target_ulong use in gen_goto_tb()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (57 preceding siblings ...)
2025-10-16 12:15 ` [PULL 58/75] target/sparc: Reduce inclusions of 'exec/cpu-common.h' Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 60/75] target/tricore: Replace target_ulong -> vaddr with tlb_fill() callees Philippe Mathieu-Daudé
` (15 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
translator_use_goto_tb() expects a vaddr type since commit
b1c09220b4c ("accel/tcg: Replace target_ulong with vaddr in
translator_*()").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-2-philmd@linaro.org>
---
target/tricore/translate.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 7c6e3095971..dd09f0651f5 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -72,7 +72,8 @@ static const char *regnames_d[] = {
typedef struct DisasContext {
DisasContextBase base;
- target_ulong pc_succ_insn;
+
+ vaddr pc_succ_insn;
uint32_t opcode;
/* Routine used to access memory */
int mem_idx;
@@ -2811,13 +2812,12 @@ static void gen_calc_usb_mulr_h(TCGv arg)
/* helpers for generating program flow micro-ops */
-static inline void gen_save_pc(target_ulong pc)
+static inline void gen_save_pc(vaddr pc)
{
tcg_gen_movi_tl(cpu_PC, pc);
}
-static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_index,
- target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_index, vaddr dest)
{
if (translator_use_goto_tb(&ctx->base, dest)) {
tcg_gen_goto_tb(tb_slot_index);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 60/75] target/tricore: Replace target_ulong -> vaddr with tlb_fill() callees
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (58 preceding siblings ...)
2025-10-16 12:15 ` [PULL 59/75] target/tricore: Remove target_ulong use in gen_goto_tb() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 61/75] target/tricore: Remove target_ulong use in translate_insn() handler Philippe Mathieu-Daudé
` (14 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
tlb_fill() provides a vaddr type since commit 68d6eee73c
("target/tricore: Convert to CPUClass::tlb_fill").
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-3-philmd@linaro.org>
---
target/tricore/helper.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
index e4c53d453dd..7574111c87d 100644
--- a/target/tricore/helper.c
+++ b/target/tricore/helper.c
@@ -35,7 +35,7 @@ enum {
};
static int get_physical_address(CPUTriCoreState *env, hwaddr *physical,
- int *prot, target_ulong address,
+ int *prot, vaddr address,
MMUAccessType access_type, int mmu_idx)
{
int ret = TLBRET_MATCH;
@@ -61,7 +61,7 @@ hwaddr tricore_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
/* TODO: Add exception support */
-static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address,
+static void raise_mmu_exception(CPUTriCoreState *env, vaddr address,
int rw, int tlb_error)
{
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 61/75] target/tricore: Remove target_ulong use in translate_insn() handler
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (59 preceding siblings ...)
2025-10-16 12:15 ` [PULL 60/75] target/tricore: Replace target_ulong -> vaddr with tlb_fill() callees Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 62/75] target/tricore: Remove target_ulong use in gen_addi_d() Philippe Mathieu-Daudé
` (13 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Since commit 85c19af63e7 ("include/exec: Use vaddr in DisasContextBase
for virtual addresses") the DisasContextBase::pc_first field is a
vaddr type.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-4-philmd@linaro.org>
---
target/tricore/translate.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index dd09f0651f5..4b769b204ab 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -8414,7 +8414,7 @@ static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
ctx->base.pc_next = ctx->pc_succ_insn;
if (ctx->base.is_jmp == DISAS_NEXT) {
- target_ulong page_start;
+ vaddr page_start;
page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 62/75] target/tricore: Remove target_ulong use in gen_addi_d()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (60 preceding siblings ...)
2025-10-16 12:15 ` [PULL 61/75] target/tricore: Remove target_ulong use in translate_insn() handler Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 63/75] target/tricore: Remove unnecessary cast to target_ulong Philippe Mathieu-Daudé
` (12 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Callers pass either int32_t or int16_t.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-5-philmd@linaro.org>
---
target/tricore/translate.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 4b769b204ab..116f45135bb 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -1308,7 +1308,7 @@ gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
+static inline void gen_addi_d(TCGv ret, TCGv r1, int32_t r2)
{
TCGv temp = tcg_constant_i32(r2);
gen_add_d(ret, r1, temp);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 63/75] target/tricore: Remove unnecessary cast to target_ulong
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (61 preceding siblings ...)
2025-10-16 12:15 ` [PULL 62/75] target/tricore: Remove target_ulong use in gen_addi_d() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 64/75] target/tricore: Replace target_ulong -> uint32_t in op_helper.c Philippe Mathieu-Daudé
` (11 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-6-philmd@linaro.org>
---
target/tricore/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index 4f035b6f768..04319e107ba 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -37,7 +37,7 @@ static const gchar *tricore_gdb_arch_name(CPUState *cs)
static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
{
- cpu_env(cs)->PC = value & ~(target_ulong)1;
+ cpu_env(cs)->PC = value & ~1;
}
static vaddr tricore_cpu_get_pc(CPUState *cs)
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 64/75] target/tricore: Replace target_ulong -> uint32_t in op_helper.c
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (62 preceding siblings ...)
2025-10-16 12:15 ` [PULL 63/75] target/tricore: Remove unnecessary cast to target_ulong Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 65/75] target/tricore: Declare registers as TCGv_i32 Philippe Mathieu-Daudé
` (10 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
The TriCore target is only built as 32-bit:
$ git grep TARGET_LONG_BITS configs/targets/tricore-*
configs/targets/tricore-softmmu.mak:2:TARGET_LONG_BITS=32
Therefore target_ulong type always expands to uint32_t.
This is a mechanical replacement.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-7-philmd@linaro.org>
---
target/tricore/op_helper.c | 219 +++++++++++++++++--------------------
1 file changed, 103 insertions(+), 116 deletions(-)
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index 9910c13f4b5..610f148a237 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -149,15 +149,15 @@ static uint32_t ssov32(CPUTriCoreState *env, int64_t arg)
if (arg > max_pos) {
env->PSW_USB_V = (1 << 31);
env->PSW_USB_SV = (1 << 31);
- ret = (target_ulong)max_pos;
+ ret = (uint32_t)max_pos;
} else {
if (arg < max_neg) {
env->PSW_USB_V = (1 << 31);
env->PSW_USB_SV = (1 << 31);
- ret = (target_ulong)max_neg;
+ ret = (uint32_t)max_neg;
} else {
env->PSW_USB_V = 0;
- ret = (target_ulong)arg;
+ ret = (uint32_t)arg;
}
}
env->PSW_USB_AV = arg ^ arg * 2u;
@@ -172,10 +172,10 @@ static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg)
if (arg > max_pos) {
env->PSW_USB_V = (1 << 31);
env->PSW_USB_SV = (1 << 31);
- ret = (target_ulong)max_pos;
+ ret = (uint32_t)max_pos;
} else {
env->PSW_USB_V = 0;
- ret = (target_ulong)arg;
+ ret = (uint32_t)arg;
}
env->PSW_USB_AV = arg ^ arg * 2u;
env->PSW_USB_SAV |= env->PSW_USB_AV;
@@ -192,7 +192,7 @@ static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg)
ret = 0;
} else {
env->PSW_USB_V = 0;
- ret = (target_ulong)arg;
+ ret = (uint32_t)arg;
}
env->PSW_USB_AV = arg ^ arg * 2u;
env->PSW_USB_SAV |= env->PSW_USB_AV;
@@ -260,8 +260,7 @@ static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1)
return (hw0 & 0xffff) | (hw1 << 16);
}
-target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -294,8 +293,7 @@ uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
return result;
}
-target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -397,8 +395,7 @@ uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
}
-target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
@@ -406,8 +403,7 @@ target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_pos(env, result);
}
-target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_add_h_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -416,8 +412,7 @@ target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1,
return suov16(env, ret_hw0, ret_hw1);
}
-target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -450,8 +445,7 @@ uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
return result;
}
-target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -552,8 +546,7 @@ uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
}
-target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
@@ -561,8 +554,7 @@ target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_neg(env, result);
}
-target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sub_h_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
@@ -571,8 +563,7 @@ target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1,
return suov16(env, ret_hw0, ret_hw1);
}
-target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_mul_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -580,8 +571,7 @@ target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_mul_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
@@ -590,8 +580,7 @@ target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_pos(env, result);
}
-target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_sha_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int32_t t2 = sextract64(r2, 0, 6);
@@ -606,14 +595,14 @@ target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1)
+uint32_t helper_abs_ssov(CPUTriCoreState *env, uint32_t r1)
{
- target_ulong result;
+ uint32_t result;
result = ((int32_t)r1 >= 0) ? r1 : (0 - r1);
return ssov32(env, result);
}
-uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1)
+uint32_t helper_abs_h_ssov(CPUTriCoreState *env, uint32_t r1)
{
int32_t ret_h0, ret_h1;
@@ -626,8 +615,7 @@ uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1)
return ssov16(env, ret_h0, ret_h1);
}
-target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_absdif_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -641,8 +629,7 @@ target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2)
+uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t t1, t2;
int32_t ret_h0, ret_h1;
@@ -666,8 +653,8 @@ uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov16(env, ret_h0, ret_h1);
}
-target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_madd32_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -678,8 +665,8 @@ target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_madd32_suov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
uint64_t t1 = extract64(r1, 0, 32);
uint64_t t2 = extract64(r2, 0, 32);
@@ -690,8 +677,8 @@ target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1,
return suov32_pos(env, result);
}
-uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_madd64_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, ovf;
int64_t t1 = sextract64(r1, 0, 32);
@@ -848,8 +835,8 @@ uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_madd64_suov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, mul;
uint64_t t1 = extract64(r1, 0, 32);
@@ -873,8 +860,8 @@ uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1,
return ret;
}
-target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_msub32_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
int64_t t1 = sextract64(r1, 0, 32);
int64_t t2 = sextract64(r2, 0, 32);
@@ -885,8 +872,8 @@ target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1,
return ssov32(env, result);
}
-target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1,
- target_ulong r2, target_ulong r3)
+uint32_t helper_msub32_suov(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
{
uint64_t t1 = extract64(r1, 0, 32);
uint64_t t2 = extract64(r2, 0, 32);
@@ -912,8 +899,8 @@ target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1,
return result;
}
-uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_msub64_ssov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, ovf;
int64_t t1 = sextract64(r1, 0, 32);
@@ -944,8 +931,8 @@ uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1,
return ret;
}
-uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1,
- uint64_t r2, target_ulong r3)
+uint64_t helper_msub64_suov(CPUTriCoreState *env, uint32_t r1,
+ uint64_t r2, uint32_t r3)
{
uint64_t ret, mul;
uint64_t t1 = extract64(r1, 0, 32);
@@ -1097,7 +1084,7 @@ uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg)
+uint32_t helper_abs_b(CPUTriCoreState *env, uint32_t arg)
{
int32_t b, i;
int32_t ovf = 0;
@@ -1120,7 +1107,7 @@ uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg)
return ret;
}
-uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg)
+uint32_t helper_abs_h(CPUTriCoreState *env, uint32_t arg)
{
int32_t h, i;
int32_t ovf = 0;
@@ -1143,7 +1130,7 @@ uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg)
return ret;
}
-uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_absdif_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t b, i;
int32_t extr_r2;
@@ -1167,7 +1154,7 @@ uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_absdif_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t h, i;
int32_t extr_r2;
@@ -1296,7 +1283,7 @@ uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_add_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t b, i;
int32_t extr_r1, extr_r2;
@@ -1322,7 +1309,7 @@ uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_add_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t h, i;
int32_t extr_r1, extr_r2;
@@ -1451,7 +1438,7 @@ uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
return ret & 0xffff0000ll;
}
-uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_sub_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t b, i;
int32_t extr_r1, extr_r2;
@@ -1477,7 +1464,7 @@ uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_sub_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t h, i;
int32_t extr_r1, extr_r2;
@@ -1502,7 +1489,7 @@ uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eq_b(target_ulong r1, target_ulong r2)
+uint32_t helper_eq_b(uint32_t r1, uint32_t r2)
{
uint32_t ret, msk;
int32_t i;
@@ -1519,7 +1506,7 @@ uint32_t helper_eq_b(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eq_h(target_ulong r1, target_ulong r2)
+uint32_t helper_eq_h(uint32_t r1, uint32_t r2)
{
int32_t ret = 0;
@@ -1534,7 +1521,7 @@ uint32_t helper_eq_h(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eqany_b(target_ulong r1, target_ulong r2)
+uint32_t helper_eqany_b(uint32_t r1, uint32_t r2)
{
int32_t i;
uint32_t ret = 0;
@@ -1546,7 +1533,7 @@ uint32_t helper_eqany_b(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_eqany_h(target_ulong r1, target_ulong r2)
+uint32_t helper_eqany_h(uint32_t r1, uint32_t r2)
{
uint32_t ret;
@@ -1556,7 +1543,7 @@ uint32_t helper_eqany_h(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_b(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_b(uint32_t r1, uint32_t r2)
{
int32_t i;
uint32_t ret = 0;
@@ -1570,7 +1557,7 @@ uint32_t helper_lt_b(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_bu(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_bu(uint32_t r1, uint32_t r2)
{
int32_t i;
uint32_t ret = 0;
@@ -1584,7 +1571,7 @@ uint32_t helper_lt_bu(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_h(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_h(uint32_t r1, uint32_t r2)
{
uint32_t ret = 0;
@@ -1599,7 +1586,7 @@ uint32_t helper_lt_h(target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_lt_hu(target_ulong r1, target_ulong r2)
+uint32_t helper_lt_hu(uint32_t r1, uint32_t r2)
{
uint32_t ret = 0;
@@ -1615,7 +1602,7 @@ uint32_t helper_lt_hu(target_ulong r1, target_ulong r2)
}
#define EXTREMA_H_B(name, op) \
-uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \
+uint32_t helper_##name ##_b(uint32_t r1, uint32_t r2) \
{ \
int32_t i, extr_r1, extr_r2; \
uint32_t ret = 0; \
@@ -1629,7 +1616,7 @@ uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \
return ret; \
} \
\
-uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\
+uint32_t helper_##name ##_bu(uint32_t r1, uint32_t r2) \
{ \
int32_t i; \
uint32_t extr_r1, extr_r2; \
@@ -1644,7 +1631,7 @@ uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\
return ret; \
} \
\
-uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \
+uint32_t helper_##name ##_h(uint32_t r1, uint32_t r2) \
{ \
int32_t extr_r1, extr_r2; \
uint32_t ret = 0; \
@@ -1662,7 +1649,7 @@ uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \
return ret; \
} \
\
-uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\
+uint32_t helper_##name ##_hu(uint32_t r1, uint32_t r2) \
{ \
uint32_t extr_r1, extr_r2; \
uint32_t ret = 0; \
@@ -1729,7 +1716,7 @@ EXTREMA_H_B(min, <)
#undef EXTREMA_H_B
-uint32_t helper_clo_h(target_ulong r1)
+uint32_t helper_clo_h(uint32_t r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
uint32_t ret_hw1 = extract32(r1, 16, 16);
@@ -1747,7 +1734,7 @@ uint32_t helper_clo_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_clz_h(target_ulong r1)
+uint32_t helper_clz_h(uint32_t r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
uint32_t ret_hw1 = extract32(r1, 16, 16);
@@ -1765,7 +1752,7 @@ uint32_t helper_clz_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_cls_h(target_ulong r1)
+uint32_t helper_cls_h(uint32_t r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
uint32_t ret_hw1 = extract32(r1, 16, 16);
@@ -1783,7 +1770,7 @@ uint32_t helper_cls_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_sh(target_ulong r1, target_ulong r2)
+uint32_t helper_sh(uint32_t r1, uint32_t r2)
{
int32_t shift_count = sextract32(r2, 0, 6);
@@ -1796,7 +1783,7 @@ uint32_t helper_sh(target_ulong r1, target_ulong r2)
}
}
-uint32_t helper_sh_h(target_ulong r1, target_ulong r2)
+uint32_t helper_sh_h(uint32_t r1, uint32_t r2)
{
int32_t ret_hw0, ret_hw1;
int32_t shift_count;
@@ -1816,7 +1803,7 @@ uint32_t helper_sh_h(target_ulong r1, target_ulong r2)
}
}
-uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+uint32_t helper_sha(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
int32_t shift_count;
int64_t result, t1;
@@ -1854,7 +1841,7 @@ uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
return ret;
}
-uint32_t helper_sha_h(target_ulong r1, target_ulong r2)
+uint32_t helper_sha_h(uint32_t r1, uint32_t r2)
{
int32_t shift_count;
int32_t ret_hw0, ret_hw1;
@@ -1874,7 +1861,7 @@ uint32_t helper_sha_h(target_ulong r1, target_ulong r2)
}
}
-uint32_t helper_bmerge(target_ulong r1, target_ulong r2)
+uint32_t helper_bmerge(uint32_t r1, uint32_t r2)
{
uint32_t i, ret;
@@ -1905,7 +1892,7 @@ uint64_t helper_bsplit(uint32_t r1)
return ret;
}
-uint32_t helper_parity(target_ulong r1)
+uint32_t helper_parity(uint32_t r1)
{
uint32_t ret;
uint32_t nOnes, i;
@@ -1942,7 +1929,7 @@ uint32_t helper_parity(target_ulong r1)
}
uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high,
- target_ulong r2)
+ uint32_t r2)
{
uint32_t ret;
int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac;
@@ -1983,7 +1970,7 @@ uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high,
return ret;
}
-uint64_t helper_unpack(target_ulong arg1)
+uint64_t helper_unpack(uint32_t arg1)
{
int32_t fp_exp = extract32(arg1, 23, 8);
int32_t fp_frac = extract32(arg1, 0, 23);
@@ -2408,7 +2395,7 @@ uint32_t helper_shuffle(uint32_t arg0, uint32_t arg1)
/* context save area (CSA) related helpers */
-static int cdc_increment(target_ulong *psw)
+static int cdc_increment(uint32_t *psw)
{
if ((*psw & MASK_PSW_CDC) == 0x7f) {
return 0;
@@ -2426,7 +2413,7 @@ static int cdc_increment(target_ulong *psw)
return 0;
}
-static int cdc_decrement(target_ulong *psw)
+static int cdc_decrement(uint32_t *psw)
{
if ((*psw & MASK_PSW_CDC) == 0x7f) {
return 0;
@@ -2442,7 +2429,7 @@ static int cdc_decrement(target_ulong *psw)
return 0;
}
-static bool cdc_zero(target_ulong *psw)
+static bool cdc_zero(uint32_t *psw)
{
int cdc = *psw & MASK_PSW_CDC;
/* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC ==
@@ -2457,7 +2444,7 @@ static bool cdc_zero(target_ulong *psw)
return count == 0;
}
-static void save_context_upper(CPUTriCoreState *env, target_ulong ea)
+static void save_context_upper(CPUTriCoreState *env, uint32_t ea)
{
cpu_stl_data(env, ea, env->PCXI);
cpu_stl_data(env, ea+4, psw_read(env));
@@ -2477,7 +2464,7 @@ static void save_context_upper(CPUTriCoreState *env, target_ulong ea)
cpu_stl_data(env, ea+60, env->gpr_d[15]);
}
-static void save_context_lower(CPUTriCoreState *env, target_ulong ea)
+static void save_context_lower(CPUTriCoreState *env, uint32_t ea)
{
cpu_stl_data(env, ea, env->PCXI);
cpu_stl_data(env, ea+4, env->gpr_a[11]);
@@ -2497,8 +2484,8 @@ static void save_context_lower(CPUTriCoreState *env, target_ulong ea)
cpu_stl_data(env, ea+60, env->gpr_d[7]);
}
-static void restore_context_upper(CPUTriCoreState *env, target_ulong ea,
- target_ulong *new_PCXI, target_ulong *new_PSW)
+static void restore_context_upper(CPUTriCoreState *env, uint32_t ea,
+ uint32_t *new_PCXI, uint32_t *new_PSW)
{
*new_PCXI = cpu_ldl_data(env, ea);
*new_PSW = cpu_ldl_data(env, ea+4);
@@ -2518,8 +2505,8 @@ static void restore_context_upper(CPUTriCoreState *env, target_ulong ea,
env->gpr_d[15] = cpu_ldl_data(env, ea+60);
}
-static void restore_context_lower(CPUTriCoreState *env, target_ulong ea,
- target_ulong *ra, target_ulong *pcxi)
+static void restore_context_lower(CPUTriCoreState *env, uint32_t ea,
+ uint32_t *ra, uint32_t *pcxi)
{
*pcxi = cpu_ldl_data(env, ea);
*ra = cpu_ldl_data(env, ea+4);
@@ -2541,10 +2528,10 @@ static void restore_context_lower(CPUTriCoreState *env, target_ulong ea,
void helper_call(CPUTriCoreState *env, uint32_t next_pc)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
- target_ulong psw;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
+ uint32_t psw;
psw = psw_read(env);
/* if (FCX == 0) trap(FCU); */
@@ -2604,9 +2591,9 @@ void helper_call(CPUTriCoreState *env, uint32_t next_pc)
void helper_ret(CPUTriCoreState *env)
{
- target_ulong ea;
- target_ulong new_PCXI;
- target_ulong new_PSW, psw;
+ uint32_t ea;
+ uint32_t new_PCXI;
+ uint32_t new_PSW, psw;
psw = psw_read(env);
/* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/
@@ -2657,9 +2644,9 @@ void helper_ret(CPUTriCoreState *env)
void helper_bisr(CPUTriCoreState *env, uint32_t const9)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
if (env->FCX == 0) {
/* FCU trap */
@@ -2701,9 +2688,9 @@ void helper_bisr(CPUTriCoreState *env, uint32_t const9)
void helper_rfe(CPUTriCoreState *env)
{
- target_ulong ea;
- target_ulong new_PCXI;
- target_ulong new_PSW;
+ uint32_t ea;
+ uint32_t new_PCXI;
+ uint32_t new_PSW;
/* if (PCXI[19: 0] == 0) then trap(CSU); */
if ((env->PCXI & 0xfffff) == 0) {
/* raise csu trap */
@@ -2762,35 +2749,35 @@ void helper_rfm(CPUTriCoreState *env)
}
}
-void helper_ldlcx(CPUTriCoreState *env, target_ulong ea)
+void helper_ldlcx(CPUTriCoreState *env, uint32_t ea)
{
uint32_t dummy;
/* insn doesn't load PCXI and RA */
restore_context_lower(env, ea, &dummy, &dummy);
}
-void helper_lducx(CPUTriCoreState *env, target_ulong ea)
+void helper_lducx(CPUTriCoreState *env, uint32_t ea)
{
uint32_t dummy;
/* insn doesn't load PCXI and PSW */
restore_context_upper(env, ea, &dummy, &dummy);
}
-void helper_stlcx(CPUTriCoreState *env, target_ulong ea)
+void helper_stlcx(CPUTriCoreState *env, uint32_t ea)
{
save_context_lower(env, ea);
}
-void helper_stucx(CPUTriCoreState *env, target_ulong ea)
+void helper_stucx(CPUTriCoreState *env, uint32_t ea)
{
save_context_upper(env, ea);
}
void helper_svlcx(CPUTriCoreState *env)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
if (env->FCX == 0) {
/* FCU trap */
@@ -2831,9 +2818,9 @@ void helper_svlcx(CPUTriCoreState *env)
void helper_svucx(CPUTriCoreState *env)
{
- target_ulong tmp_FCX;
- target_ulong ea;
- target_ulong new_FCX;
+ uint32_t tmp_FCX;
+ uint32_t ea;
+ uint32_t new_FCX;
if (env->FCX == 0) {
/* FCU trap */
@@ -2874,8 +2861,8 @@ void helper_svucx(CPUTriCoreState *env)
void helper_rslcx(CPUTriCoreState *env)
{
- target_ulong ea;
- target_ulong new_PCXI;
+ uint32_t ea;
+ uint32_t new_PCXI;
/* if (PCXI[19: 0] == 0) then trap(CSU); */
if ((env->PCXI & 0xfffff) == 0) {
/* CSU trap */
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 65/75] target/tricore: Declare registers as TCGv_i32
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (63 preceding siblings ...)
2025-10-16 12:15 ` [PULL 64/75] target/tricore: Replace target_ulong -> uint32_t in op_helper.c Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 66/75] target/tricore: Inline tcg_gen_ld32u_tl() Philippe Mathieu-Daudé
` (9 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
CPUTriCoreState register are declared as uint32_t since the
target introduction in commit 48e06fe0ed8 ("target-tricore:
Add target stubs and qom-cpu").
Mechanical replacement of:
TCGv -> TCGv_i32
tcg_temp_new -> tcg_temp_new_i32
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-8-philmd@linaro.org>
---
target/tricore/translate.c | 82 +++++++++++++++++++-------------------
1 file changed, 42 insertions(+), 40 deletions(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 116f45135bb..112a1b9a12e 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -44,19 +44,19 @@
/*
* TCG registers
*/
-static TCGv cpu_PC;
-static TCGv cpu_PCXI;
-static TCGv cpu_PSW;
-static TCGv cpu_ICR;
+static TCGv_i32 cpu_PC;
+static TCGv_i32 cpu_PCXI;
+static TCGv_i32 cpu_PSW;
+static TCGv_i32 cpu_ICR;
/* GPR registers */
-static TCGv cpu_gpr_a[16];
-static TCGv cpu_gpr_d[16];
+static TCGv_i32 cpu_gpr_a[16];
+static TCGv_i32 cpu_gpr_d[16];
/* PSW Flag cache */
-static TCGv cpu_PSW_C;
-static TCGv cpu_PSW_V;
-static TCGv cpu_PSW_SV;
-static TCGv cpu_PSW_AV;
-static TCGv cpu_PSW_SAV;
+static TCGv_i32 cpu_PSW_C;
+static TCGv_i32 cpu_PSW_V;
+static TCGv_i32 cpu_PSW_SV;
+static TCGv_i32 cpu_PSW_AV;
+static TCGv_i32 cpu_PSW_SAV;
static const char *regnames_a[] = {
"a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
@@ -8480,14 +8480,14 @@ void cpu_state_reset(CPUTriCoreState *env)
static void tricore_tcg_init_csfr(void)
{
- cpu_PCXI = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PCXI), "PCXI");
- cpu_PSW = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW), "PSW");
- cpu_PC = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PC), "PC");
- cpu_ICR = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, ICR), "ICR");
+ cpu_PCXI = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PCXI), "PCXI");
+ cpu_PSW = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW), "PSW");
+ cpu_PC = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PC), "PC");
+ cpu_ICR = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, ICR), "ICR");
}
void tricore_tcg_init(void)
@@ -8496,30 +8496,32 @@ void tricore_tcg_init(void)
/* reg init */
for (i = 0 ; i < 16 ; i++) {
- cpu_gpr_a[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, gpr_a[i]),
- regnames_a[i]);
+ cpu_gpr_a[i] = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState,
+ gpr_a[i]),
+ regnames_a[i]);
}
for (i = 0 ; i < 16 ; i++) {
- cpu_gpr_d[i] = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, gpr_d[i]),
- regnames_d[i]);
+ cpu_gpr_d[i] = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState,
+ gpr_d[i]),
+ regnames_d[i]);
}
tricore_tcg_init_csfr();
/* init PSW flag cache */
- cpu_PSW_C = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_C),
- "PSW_C");
- cpu_PSW_V = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_V),
- "PSW_V");
- cpu_PSW_SV = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_SV),
- "PSW_SV");
- cpu_PSW_AV = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_AV),
- "PSW_AV");
- cpu_PSW_SAV = tcg_global_mem_new(tcg_env,
- offsetof(CPUTriCoreState, PSW_USB_SAV),
- "PSW_SAV");
+ cpu_PSW_C = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_C),
+ "PSW_C");
+ cpu_PSW_V = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_V),
+ "PSW_V");
+ cpu_PSW_SV = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_SV),
+ "PSW_SV");
+ cpu_PSW_AV = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_AV),
+ "PSW_AV");
+ cpu_PSW_SAV = tcg_global_mem_new_i32(tcg_env,
+ offsetof(CPUTriCoreState, PSW_USB_SAV),
+ "PSW_SAV");
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 66/75] target/tricore: Inline tcg_gen_ld32u_tl()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (64 preceding siblings ...)
2025-10-16 12:15 ` [PULL 65/75] target/tricore: Declare registers as TCGv_i32 Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 67/75] target/tricore: Expand TCG helpers for 32-bit target Philippe Mathieu-Daudé
` (8 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
The TriCore target is only built as 32-bit, so tcg_gen_ld32u_tl()
expands to tcg_gen_ld_i32(). Use the latter to simplify the next
commit mechanical change.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-9-philmd@linaro.org>
---
target/tricore/translate.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 112a1b9a12e..2cfd2e5b8df 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -7959,7 +7959,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
tmp = tcg_temp_new();
l1 = gen_new_label();
- tcg_gen_ld32u_tl(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR));
+ tcg_gen_ld_i32(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR));
tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE);
tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1);
gen_helper_rfm(tcg_env);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 67/75] target/tricore: Expand TCG helpers for 32-bit target
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (65 preceding siblings ...)
2025-10-16 12:15 ` [PULL 66/75] target/tricore: Inline tcg_gen_ld32u_tl() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 68/75] target/tricore: Pass DisasContext as first argument Philippe Mathieu-Daudé
` (7 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
The TriCore target is only built as 32-bit:
$ git grep TARGET_LONG_BITS configs/targets/tricore-*
configs/targets/tricore-softmmu.mak:2:TARGET_LONG_BITS=32
Therefore tcg_FOO_tl() always expands to tcg_FOO_i32().
This is a mechanical replacement.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-10-philmd@linaro.org>
---
target/tricore/translate.c | 2595 ++++++++++++++++++------------------
1 file changed, 1298 insertions(+), 1297 deletions(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 2cfd2e5b8df..ef3653b8820 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -139,9 +139,9 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
TCGv arg00 = tcg_temp_new(); \
TCGv arg01 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg00, arg0, 16); \
- tcg_gen_ext16s_tl(arg01, arg0); \
- tcg_gen_ext16s_tl(arg11, arg1); \
+ tcg_gen_sari_i32(arg00, arg0, 16); \
+ tcg_gen_ext16s_i32(arg01, arg0); \
+ tcg_gen_ext16s_i32(arg11, arg1); \
gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
} while (0)
@@ -150,10 +150,10 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
TCGv arg01 = tcg_temp_new(); \
TCGv arg10 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg00, arg0, 16); \
- tcg_gen_ext16s_tl(arg01, arg0); \
- tcg_gen_sari_tl(arg11, arg1, 16); \
- tcg_gen_ext16s_tl(arg10, arg1); \
+ tcg_gen_sari_i32(arg00, arg0, 16); \
+ tcg_gen_ext16s_i32(arg01, arg0); \
+ tcg_gen_sari_i32(arg11, arg1, 16); \
+ tcg_gen_ext16s_i32(arg10, arg1); \
gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
} while (0)
@@ -162,10 +162,10 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
TCGv arg01 = tcg_temp_new(); \
TCGv arg10 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg00, arg0, 16); \
- tcg_gen_ext16s_tl(arg01, arg0); \
- tcg_gen_sari_tl(arg10, arg1, 16); \
- tcg_gen_ext16s_tl(arg11, arg1); \
+ tcg_gen_sari_i32(arg00, arg0, 16); \
+ tcg_gen_ext16s_i32(arg01, arg0); \
+ tcg_gen_sari_i32(arg10, arg1, 16); \
+ tcg_gen_ext16s_i32(arg11, arg1); \
gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
} while (0)
@@ -173,9 +173,9 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
TCGv arg00 = tcg_temp_new(); \
TCGv arg01 = tcg_temp_new(); \
TCGv arg11 = tcg_temp_new(); \
- tcg_gen_sari_tl(arg01, arg0, 16); \
- tcg_gen_ext16s_tl(arg00, arg0); \
- tcg_gen_sari_tl(arg11, arg1, 16); \
+ tcg_gen_sari_i32(arg01, arg0, 16); \
+ tcg_gen_ext16s_i32(arg00, arg0); \
+ tcg_gen_sari_i32(arg11, arg1, 16); \
gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
} while (0)
@@ -214,16 +214,16 @@ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
int16_t con, MemOp mop)
{
TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, con);
- tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_addi_i32(temp, r2, con);
+ tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop);
}
static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
int16_t con, MemOp mop)
{
TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, con);
- tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_addi_i32(temp, r2, con);
+ tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop);
}
static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
@@ -238,7 +238,7 @@ static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
DisasContext *ctx)
{
TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, base, con);
+ tcg_gen_addi_i32(temp, base, con);
gen_st_2regs_64(rh, rl, temp, ctx);
}
@@ -255,7 +255,7 @@ static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
DisasContext *ctx)
{
TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, base, con);
+ tcg_gen_addi_i32(temp, base, con);
gen_ld_2regs_64(rh, rl, temp, ctx);
}
@@ -263,18 +263,18 @@ static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
MemOp mop)
{
TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, off);
- tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
- tcg_gen_mov_tl(r2, temp);
+ tcg_gen_addi_i32(temp, r2, off);
+ tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_i32(r2, temp);
}
static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
MemOp mop)
{
TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, r2, off);
- tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
- tcg_gen_mov_tl(r2, temp);
+ tcg_gen_addi_i32(temp, r2, off);
+ tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_i32(r2, temp);
}
/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
@@ -285,15 +285,15 @@ static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
CHECK_REG_PAIR(ereg);
/* temp = (M(EA, word) */
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
/* temp = temp & ~E[a][63:32]) */
- tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
+ tcg_gen_andc_i32(temp, temp, cpu_gpr_d[ereg + 1]);
/* temp2 = (E[a][31:0] & E[a][63:32]); */
- tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
+ tcg_gen_and_i32(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg + 1]);
/* temp = temp | temp2; */
- tcg_gen_or_tl(temp, temp, temp2);
+ tcg_gen_or_i32(temp, temp, temp2);
/* M(EA, word) = temp; */
- tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(temp, ea, ctx->mem_idx, MO_LEUL);
}
/* tmp = M(EA, word);
@@ -303,9 +303,9 @@ static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
{
TCGv temp = tcg_temp_new();
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
@@ -313,11 +313,11 @@ static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
CHECK_REG_PAIR(reg);
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp,
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_movcond_i32(TCG_COND_EQ, temp2, cpu_gpr_d[reg + 1], temp,
cpu_gpr_d[reg], temp);
- tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_st_i32(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
@@ -326,12 +326,12 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
CHECK_REG_PAIR(reg);
- tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]);
- tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]);
- tcg_gen_or_tl(temp2, temp2, temp3);
- tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
- tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_and_i32(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg + 1]);
+ tcg_gen_andc_i32(temp3, temp, cpu_gpr_d[reg + 1]);
+ tcg_gen_or_i32(temp2, temp2, temp3);
+ tcg_gen_qemu_st_i32(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
/* We generate loads and store to core special function register (csfr) through
@@ -341,7 +341,7 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
#define R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (has_feature(ctx, FEATURE)) { \
- tcg_gen_ld_tl(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \
+ tcg_gen_ld_i32(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
@@ -366,7 +366,7 @@ static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (has_feature(ctx, FEATURE)) { \
- tcg_gen_st_tl(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \
+ tcg_gen_st_i32(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
/* Endinit protected registers
@@ -399,20 +399,20 @@ static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
/* Addition and set V/SV bits */
- tcg_gen_add_tl(result, r1, r2);
+ tcg_gen_add_i32(result, r1, r2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(t0, r1, r2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(t0, r1, r2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
static inline void
@@ -430,13 +430,13 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_andc_i64(t1, t1, t0);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
tcg_gen_extrh_i64_i32(temp, result);
- tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
- tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_i64(ret, result);
}
@@ -453,38 +453,38 @@ gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
(*op1)(temp, r1_low, r2);
/* calc V0 bit */
- tcg_gen_xor_tl(temp2, temp, r1_low);
- tcg_gen_xor_tl(temp3, r1_low, r2);
+ tcg_gen_xor_i32(temp2, temp, r1_low);
+ tcg_gen_xor_i32(temp3, r1_low, r2);
if (op1 == tcg_gen_add_tl) {
- tcg_gen_andc_tl(temp2, temp2, temp3);
+ tcg_gen_andc_i32(temp2, temp2, temp3);
} else {
- tcg_gen_and_tl(temp2, temp2, temp3);
+ tcg_gen_and_i32(temp2, temp2, temp3);
}
(*op2)(temp3, r1_high, r3);
/* calc V1 bit */
- tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high);
- tcg_gen_xor_tl(temp4, r1_high, r3);
+ tcg_gen_xor_i32(cpu_PSW_V, temp3, r1_high);
+ tcg_gen_xor_i32(temp4, r1_high, r3);
if (op2 == tcg_gen_add_tl) {
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, temp4);
} else {
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp4);
}
/* combine V0/V1 bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp2);
/* calc sv bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
- tcg_gen_mov_tl(ret_low, temp);
- tcg_gen_mov_tl(ret_high, temp3);
+ tcg_gen_mov_i32(ret_low, temp);
+ tcg_gen_mov_i32(ret_high, temp3);
/* calc AV bit */
- tcg_gen_add_tl(temp, ret_low, ret_low);
- tcg_gen_xor_tl(temp, temp, ret_low);
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high);
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ tcg_gen_add_i32(temp, ret_low, ret_low);
+ tcg_gen_xor_i32(temp, temp, ret_low);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, cpu_PSW_AV, ret_high);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
/* ret = r2 + (r1 * r3); */
@@ -509,14 +509,14 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
@@ -534,23 +534,23 @@ gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv t3 = tcg_temp_new();
TCGv t4 = tcg_temp_new();
- tcg_gen_muls2_tl(t1, t2, r1, r3);
+ tcg_gen_muls2_i32(t1, t2, r1, r3);
/* only the add can overflow */
- tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ tcg_gen_add2_i32(t3, t4, r2_low, r2_high, t1, t2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
- tcg_gen_xor_tl(t1, r2_high, t2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ tcg_gen_xor_i32(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_i32(t1, r2_high, t2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t1);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
- tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_i32(cpu_PSW_AV, t4, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back the result */
- tcg_gen_mov_tl(ret_low, t3);
- tcg_gen_mov_tl(ret_high, t4);
+ tcg_gen_mov_i32(ret_low, t3);
+ tcg_gen_mov_i32(ret_high, t4);
}
static inline void
@@ -573,14 +573,14 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
calc V bit */
tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void
@@ -714,13 +714,13 @@ gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_adds(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_adds(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2);
@@ -751,13 +751,13 @@ gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_subs(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_adds(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
static inline void
@@ -876,8 +876,8 @@ gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
}
@@ -902,8 +902,8 @@ gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_addsur_h(ret, tcg_env, temp64, temp, temp2);
}
@@ -937,8 +937,8 @@ gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
@@ -963,8 +963,8 @@ gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_addsur_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
@@ -1009,27 +1009,27 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_shli_tl(temp, temp, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_shli_i32(temp, temp, 31);
/* negate v bit, if special condition */
- tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp);
}
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
- tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp3, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, temp3);
+ tcg_gen_mov_i32(ret, temp3);
}
static inline void
@@ -1038,13 +1038,13 @@ gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_add_d(ret, arg1, temp);
}
@@ -1055,13 +1055,13 @@ gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_adds(ret, arg1, temp);
}
@@ -1077,13 +1077,13 @@ gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv_i64 t3 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1103,13 +1103,13 @@ gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv_i64 t2 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1149,22 +1149,22 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
if (n == 1) {
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_shli_tl(temp, temp, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_shli_i32(temp, temp, 31);
/* negate v bit, if special condition */
- tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp);
}
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t4);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
- tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void
@@ -1219,15 +1219,15 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
@@ -1245,23 +1245,23 @@ gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv t3 = tcg_temp_new();
TCGv t4 = tcg_temp_new();
- tcg_gen_muls2_tl(t1, t2, r1, r3);
+ tcg_gen_muls2_i32(t1, t2, r1, r3);
/* only the sub can overflow */
- tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ tcg_gen_sub2_i32(t3, t4, r2_low, r2_high, t1, t2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
- tcg_gen_xor_tl(t1, r2_high, t2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ tcg_gen_xor_i32(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_i32(t1, r2_high, t2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, t1);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
- tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_i32(cpu_PSW_AV, t4, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back the result */
- tcg_gen_mov_tl(ret_low, t3);
- tcg_gen_mov_tl(ret_high, t4);
+ tcg_gen_mov_i32(ret_low, t3);
+ tcg_gen_mov_i32(ret_high, t4);
}
static inline void
@@ -1290,14 +1290,14 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
/* calc V bit, only the sub can overflow, if t1 > t2 */
tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void
@@ -1320,22 +1320,22 @@ static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
- tcg_gen_movi_tl(t0, 0);
+ tcg_gen_movi_i32(t0, 0);
/* Addition and set C/V/SV bits */
tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(t0, r1, r2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(t0, r1, r2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
@@ -1352,18 +1352,18 @@ static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
/* Addition, carry and set C/V/SV bits */
tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(t0, r1, r2);
- tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(t0, r1, r2);
+ tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
@@ -1382,27 +1382,27 @@ static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
TCGv t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
- tcg_gen_setcond_tl(cond, mask, r4, t0);
- tcg_gen_shli_tl(mask, mask, 31);
+ tcg_gen_setcond_i32(cond, mask, r4, t0);
+ tcg_gen_shli_i32(mask, mask, 31);
- tcg_gen_add_tl(result, r1, r2);
+ tcg_gen_add_i32(result, r1, r2);
/* Calc PSW_V */
- tcg_gen_xor_tl(temp, result, r1);
- tcg_gen_xor_tl(temp2, r1, r2);
- tcg_gen_andc_tl(temp, temp, temp2);
- tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ tcg_gen_xor_i32(temp, result, r1);
+ tcg_gen_xor_i32(temp2, r1, r2);
+ tcg_gen_andc_i32(temp, temp, temp2);
+ tcg_gen_movcond_i32(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
/* Set PSW_SV */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SV, temp, cpu_PSW_SV);
/* calc AV bit */
- tcg_gen_add_tl(temp, result, result);
- tcg_gen_xor_tl(temp, temp, result);
- tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(temp, result, result);
+ tcg_gen_xor_i32(temp, temp, result);
+ tcg_gen_movcond_i32(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SAV, temp, cpu_PSW_SAV);
/* write back result */
- tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+ tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
@@ -1417,20 +1417,20 @@ static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
TCGv temp = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
- tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_i32(result, r1, r2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(temp, r1, r2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(temp, r1, r2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
static inline void
@@ -1448,13 +1448,13 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_and_i64(t1, t1, t0);
tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
tcg_gen_extrh_i64_i32(temp, result);
- tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
- tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
tcg_gen_mov_i64(ret, result);
}
@@ -1464,28 +1464,28 @@ static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
TCGv result = tcg_temp_new();
TCGv temp = tcg_temp_new();
- tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_i32(result, r1, r2);
/* calc C bit */
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_PSW_C, r1, r2);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(temp, r1, r2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(temp, r1, r2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv temp = tcg_temp_new();
- tcg_gen_not_tl(temp, r2);
+ tcg_gen_not_i32(temp, r2);
gen_addc_CC(ret, r1, temp);
}
@@ -1499,27 +1499,27 @@ static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
TCGv t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
- tcg_gen_setcond_tl(cond, mask, r4, t0);
- tcg_gen_shli_tl(mask, mask, 31);
+ tcg_gen_setcond_i32(cond, mask, r4, t0);
+ tcg_gen_shli_i32(mask, mask, 31);
- tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_i32(result, r1, r2);
/* Calc PSW_V */
- tcg_gen_xor_tl(temp, result, r1);
- tcg_gen_xor_tl(temp2, r1, r2);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ tcg_gen_xor_i32(temp, result, r1);
+ tcg_gen_xor_i32(temp2, r1, r2);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_movcond_i32(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
/* Set PSW_SV */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SV, temp, cpu_PSW_SV);
/* calc AV bit */
- tcg_gen_add_tl(temp, result, result);
- tcg_gen_xor_tl(temp, temp, result);
- tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ tcg_gen_add_i32(temp, result, result);
+ tcg_gen_xor_i32(temp, temp, result);
+ tcg_gen_movcond_i32(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_or_i32(cpu_PSW_SAV, temp, cpu_PSW_SAV);
/* write back result */
- tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+ tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
static inline void
@@ -1575,13 +1575,13 @@ gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_subs(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_subs(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
static inline void
@@ -1667,8 +1667,8 @@ gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
}
@@ -1701,8 +1701,8 @@ gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
@@ -1749,16 +1749,16 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
- tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_i32(cpu_PSW_AV, temp3, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, temp3);
+ tcg_gen_mov_i32(ret, temp3);
}
static inline void
@@ -1767,13 +1767,13 @@ gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_sub_d(ret, arg1, temp);
}
@@ -1784,13 +1784,13 @@ gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
gen_subs(ret, arg1, temp);
}
@@ -1806,13 +1806,13 @@ gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv_i64 t3 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1832,13 +1832,13 @@ gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv_i64 t2 = tcg_temp_new_i64();
if (n == 0) {
- tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(temp, arg2, arg3);
- tcg_gen_shli_tl(temp, temp, 1);
+ tcg_gen_mul_i32(temp, arg2, arg3);
+ tcg_gen_shli_i32(temp, temp, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
- tcg_gen_sub_tl(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_i32(temp, temp, temp2);
}
tcg_gen_ext_i32_i64(t2, temp);
tcg_gen_shli_i64(t2, t2, 16);
@@ -1878,22 +1878,22 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
if (n == 1) {
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_shli_tl(temp, temp, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_shli_i32(temp, temp, 31);
/* negate v bit, if special condition */
- tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp);
}
/* write back result */
tcg_gen_extr_i64_i32(rl, rh, t4);
/* Calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
- tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
- tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV);
/* calc SAV */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void
@@ -2012,8 +2012,8 @@ gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_subadr_h(ret, tcg_env, temp64, temp, temp2);
}
@@ -2043,13 +2043,13 @@ gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
gen_adds(ret_low, r1_low, temp);
- tcg_gen_mov_tl(temp, cpu_PSW_V);
- tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ tcg_gen_mov_i32(temp, cpu_PSW_V);
+ tcg_gen_mov_i32(temp3, cpu_PSW_AV);
gen_subs(ret_high, r1_high, temp2);
/* combine v bits */
- tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* combine av bits */
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
static inline void
@@ -2105,24 +2105,24 @@ gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- tcg_gen_andi_tl(temp2, r1, 0xffff0000);
- tcg_gen_shli_tl(temp, r1, 16);
+ tcg_gen_andi_i32(temp2, r1, 0xffff0000);
+ tcg_gen_shli_i32(temp, r1, 16);
gen_helper_subadr_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
static inline void gen_abs(TCGv ret, TCGv r1)
{
- tcg_gen_abs_tl(ret, r1);
+ tcg_gen_abs_i32(ret, r1);
/* overflow can only happen, if r1 = 0x80000000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
@@ -2130,25 +2130,25 @@ static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
TCGv temp = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
- tcg_gen_sub_tl(result, r1, r2);
- tcg_gen_sub_tl(temp, r2, r1);
- tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
+ tcg_gen_sub_i32(result, r1, r2);
+ tcg_gen_sub_i32(temp, r2, r1);
+ tcg_gen_movcond_i32(TCG_COND_GT, result, r1, r2, result, temp);
/* calc V bit */
- tcg_gen_xor_tl(cpu_PSW_V, result, r1);
- tcg_gen_xor_tl(temp, result, r2);
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
- tcg_gen_xor_tl(temp, r1, r2);
- tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(cpu_PSW_V, result, r1);
+ tcg_gen_xor_i32(temp, result, r2);
+ tcg_gen_movcond_i32(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
+ tcg_gen_xor_i32(temp, r1, r2);
+ tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, result, result);
- tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, result, result);
+ tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* write back result */
- tcg_gen_mov_tl(ret, result);
+ tcg_gen_mov_i32(ret, result);
}
static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
@@ -2168,19 +2168,19 @@ static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
TCGv high = tcg_temp_new();
TCGv low = tcg_temp_new();
- tcg_gen_muls2_tl(low, high, r1, r2);
- tcg_gen_mov_tl(ret, low);
+ tcg_gen_muls2_i32(low, high, r1, r2);
+ tcg_gen_mov_i32(ret, low);
/* calc V bit */
- tcg_gen_sari_tl(low, low, 31);
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_sari_i32(low, low, 31);
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_PSW_V, high, low);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
@@ -2191,16 +2191,16 @@ static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
{
- tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
+ tcg_gen_muls2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
@@ -2212,16 +2212,16 @@ static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
{
- tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
+ tcg_gen_mulu2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
- tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
@@ -2264,7 +2264,7 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
if (n == 0) {
if (up_shift == 32) {
- tcg_gen_muls2_tl(rh, rl, arg1, arg2);
+ tcg_gen_muls2_i32(rh, rl, arg1, arg2);
} else if (up_shift == 16) {
tcg_gen_ext_i32_i64(temp_64, arg1);
tcg_gen_ext_i32_i64(temp2_64, arg2);
@@ -2273,10 +2273,10 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
tcg_gen_shri_i64(temp_64, temp_64, up_shift);
tcg_gen_extr_i64_i32(rl, rh, temp_64);
} else {
- tcg_gen_muls2_tl(rl, rh, arg1, arg2);
+ tcg_gen_muls2_i32(rl, rh, arg1, arg2);
}
/* reset v bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
} else { /* n is expected to be 1 */
tcg_gen_ext_i32_i64(temp_64, arg1);
tcg_gen_ext_i32_i64(temp2_64, arg2);
@@ -2291,26 +2291,26 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
tcg_gen_extr_i64_i32(rl, rh, temp_64);
/* overflow only occurs if r1 = r2 = 0x8000 */
if (up_shift == 0) {/* result is 64 bit */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh,
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, rh,
0x80000000);
} else { /* result is 32 bit */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl,
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, rl,
0x80000000);
}
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc sv overflow bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
}
/* calc av overflow bit */
if (up_shift == 0) {
- tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
- tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV);
} else {
- tcg_gen_add_tl(cpu_PSW_AV, rl, rl);
- tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, rl, rl);
+ tcg_gen_xor_i32(cpu_PSW_AV, rl, cpu_PSW_AV);
}
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static void
@@ -2318,47 +2318,47 @@ gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
{
TCGv temp = tcg_temp_new();
if (n == 0) {
- tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_mul_i32(ret, arg1, arg2);
} else { /* n is expected to be 1 */
- tcg_gen_mul_tl(ret, arg1, arg2);
- tcg_gen_shli_tl(ret, ret, 1);
+ tcg_gen_mul_i32(ret, arg1, arg2);
+ tcg_gen_shli_i32(ret, ret, 1);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000);
- tcg_gen_sub_tl(ret, ret, temp);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, ret, 0x80000000);
+ tcg_gen_sub_i32(ret, ret, temp);
}
/* reset v bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc av overflow bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
{
TCGv temp = tcg_temp_new();
if (n == 0) {
- tcg_gen_mul_tl(ret, arg1, arg2);
- tcg_gen_addi_tl(ret, ret, 0x8000);
+ tcg_gen_mul_i32(ret, arg1, arg2);
+ tcg_gen_addi_i32(ret, ret, 0x8000);
} else {
- tcg_gen_mul_tl(ret, arg1, arg2);
- tcg_gen_shli_tl(ret, ret, 1);
- tcg_gen_addi_tl(ret, ret, 0x8000);
+ tcg_gen_mul_i32(ret, arg1, arg2);
+ tcg_gen_shli_i32(ret, ret, 1);
+ tcg_gen_addi_i32(ret, ret, 0x8000);
/* catch special case r1 = r2 = 0x8000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000);
- tcg_gen_muli_tl(temp, temp, 0x8001);
- tcg_gen_sub_tl(ret, ret, temp);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, ret, 0x80008000);
+ tcg_gen_muli_i32(temp, temp, 0x8001);
+ tcg_gen_sub_i32(ret, ret, temp);
}
/* reset v bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc av overflow bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* cut halfword off */
- tcg_gen_andi_tl(ret, ret, 0xffff0000);
+ tcg_gen_andi_i32(ret, ret, 0xffff0000);
}
static inline void
@@ -2447,23 +2447,23 @@ gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
{
- tcg_gen_smax_tl(ret, arg, tcg_constant_i32(low));
- tcg_gen_smin_tl(ret, ret, tcg_constant_i32(up));
+ tcg_gen_smax_i32(ret, arg, tcg_constant_i32(low));
+ tcg_gen_smin_i32(ret, ret, tcg_constant_i32(up));
}
static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
{
- tcg_gen_umin_tl(ret, arg, tcg_constant_i32(up));
+ tcg_gen_umin_i32(ret, arg, tcg_constant_i32(up));
}
static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
{
if (shift_count == -32) {
- tcg_gen_movi_tl(ret, 0);
+ tcg_gen_movi_i32(ret, 0);
} else if (shift_count >= 0) {
- tcg_gen_shli_tl(ret, r1, shift_count);
+ tcg_gen_shli_i32(ret, r1, shift_count);
} else {
- tcg_gen_shri_tl(ret, r1, -shift_count);
+ tcg_gen_shri_i32(ret, r1, -shift_count);
}
}
@@ -2472,16 +2472,16 @@ static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
TCGv temp_low, temp_high;
if (shiftcount == -16) {
- tcg_gen_movi_tl(ret, 0);
+ tcg_gen_movi_i32(ret, 0);
} else {
temp_high = tcg_temp_new();
temp_low = tcg_temp_new();
- tcg_gen_andi_tl(temp_low, r1, 0xffff);
- tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
+ tcg_gen_andi_i32(temp_low, r1, 0xffff);
+ tcg_gen_andi_i32(temp_high, r1, 0xffff0000);
gen_shi(temp_low, temp_low, shiftcount);
gen_shi(ret, temp_high, shiftcount);
- tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
+ tcg_gen_deposit_i32(ret, ret, temp_low, 0, 16);
}
}
@@ -2493,16 +2493,16 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
if (shift_count == 0) {
/* Clear PSW.C and PSW.V */
- tcg_gen_movi_tl(cpu_PSW_C, 0);
- tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
- tcg_gen_mov_tl(ret, r1);
+ tcg_gen_movi_i32(cpu_PSW_C, 0);
+ tcg_gen_mov_i32(cpu_PSW_V, cpu_PSW_C);
+ tcg_gen_mov_i32(ret, r1);
} else if (shift_count == -32) {
/* set PSW.C */
- tcg_gen_mov_tl(cpu_PSW_C, r1);
+ tcg_gen_mov_i32(cpu_PSW_C, r1);
/* fill ret completely with sign bit */
- tcg_gen_sari_tl(ret, r1, 31);
+ tcg_gen_sari_i32(ret, r1, 31);
/* clear PSW.V */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
} else if (shift_count > 0) {
TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count);
TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count);
@@ -2510,30 +2510,30 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
/* calc carry */
msk_start = 32 - shift_count;
msk = ((1 << shift_count) - 1) << msk_start;
- tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ tcg_gen_andi_i32(cpu_PSW_C, r1, msk);
/* calc v/sv bits */
- tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
- tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
- tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcond_i32(TCG_COND_GT, temp, r1, t_max);
+ tcg_gen_setcond_i32(TCG_COND_LT, temp2, r1, t_min);
+ tcg_gen_or_i32(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
/* do shift */
- tcg_gen_shli_tl(ret, r1, shift_count);
+ tcg_gen_shli_i32(ret, r1, shift_count);
} else {
/* clear PSW.V */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* calc carry */
msk = (1 << -shift_count) - 1;
- tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ tcg_gen_andi_i32(cpu_PSW_C, r1, msk);
/* do shift */
- tcg_gen_sari_tl(ret, r1, -shift_count);
+ tcg_gen_sari_i32(ret, r1, -shift_count);
}
/* calc av overflow bit */
- tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
- tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV);
/* calc sav overflow bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
@@ -2552,23 +2552,23 @@ static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
TCGv low, high;
if (shift_count == 0) {
- tcg_gen_mov_tl(ret, r1);
+ tcg_gen_mov_i32(ret, r1);
} else if (shift_count > 0) {
low = tcg_temp_new();
high = tcg_temp_new();
- tcg_gen_andi_tl(high, r1, 0xffff0000);
- tcg_gen_shli_tl(low, r1, shift_count);
- tcg_gen_shli_tl(ret, high, shift_count);
- tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+ tcg_gen_andi_i32(high, r1, 0xffff0000);
+ tcg_gen_shli_i32(low, r1, shift_count);
+ tcg_gen_shli_i32(ret, high, shift_count);
+ tcg_gen_deposit_i32(ret, ret, low, 0, 16);
} else {
low = tcg_temp_new();
high = tcg_temp_new();
- tcg_gen_ext16s_tl(low, r1);
- tcg_gen_sari_tl(low, low, -shift_count);
- tcg_gen_sari_tl(ret, r1, -shift_count);
- tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+ tcg_gen_ext16s_i32(low, r1);
+ tcg_gen_sari_i32(low, low, -shift_count);
+ tcg_gen_sari_i32(ret, r1, -shift_count);
+ tcg_gen_deposit_i32(ret, ret, low, 0, 16);
}
}
@@ -2578,9 +2578,9 @@ static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
- tcg_gen_shli_tl(temp, ret, 1);
- tcg_gen_setcond_tl(cond, temp2, r1, r2);
- tcg_gen_or_tl(ret, temp, temp2);
+ tcg_gen_shli_i32(temp, ret, 1);
+ tcg_gen_setcond_i32(cond, temp2, r1, r2);
+ tcg_gen_or_i32(ret, temp, temp2);
}
static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
@@ -2626,13 +2626,13 @@ static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
temp1 = tcg_temp_new();
temp2 = tcg_temp_new();
- tcg_gen_shri_tl(temp2, r2, pos2);
- tcg_gen_shri_tl(temp1, r1, pos1);
+ tcg_gen_shri_i32(temp2, r2, pos2);
+ tcg_gen_shri_i32(temp1, r1, pos1);
(*op1)(temp1, temp1, temp2);
(*op2)(temp1 , ret, temp1);
- tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
+ tcg_gen_deposit_i32(ret, ret, temp1, 0, 1);
}
/* ret = r1[pos1] op1 r2[pos2]; */
@@ -2645,12 +2645,12 @@ static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
temp1 = tcg_temp_new();
temp2 = tcg_temp_new();
- tcg_gen_shri_tl(temp2, r2, pos2);
- tcg_gen_shri_tl(temp1, r1, pos1);
+ tcg_gen_shri_i32(temp2, r2, pos2);
+ tcg_gen_shri_i32(temp1, r1, pos1);
(*op1)(ret, temp1, temp2);
- tcg_gen_andi_tl(ret, ret, 0x1);
+ tcg_gen_andi_i32(ret, ret, 0x1);
}
static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
@@ -2659,13 +2659,13 @@ static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
/* temp = (arg1 cond arg2 )*/
- tcg_gen_setcond_tl(cond, temp, r1, r2);
+ tcg_gen_setcond_i32(cond, temp, r1, r2);
/* temp2 = ret[0]*/
- tcg_gen_andi_tl(temp2, ret, 0x1);
+ tcg_gen_andi_i32(temp2, ret, 0x1);
/* temp = temp insn temp2 */
(*op)(temp, temp, temp2);
/* ret = {ret[31:1], temp} */
- tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
+ tcg_gen_deposit_i32(ret, ret, temp, 0, 1);
}
static inline void
@@ -2684,25 +2684,25 @@ static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
TCGv b3 = tcg_temp_new();
/* byte 0 */
- tcg_gen_andi_tl(b0, r1, 0xff);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
+ tcg_gen_andi_i32(b0, r1, 0xff);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b0, b0, con & 0xff);
/* byte 1 */
- tcg_gen_andi_tl(b1, r1, 0xff00);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
+ tcg_gen_andi_i32(b1, r1, 0xff00);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b1, b1, con & 0xff00);
/* byte 2 */
- tcg_gen_andi_tl(b2, r1, 0xff0000);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
+ tcg_gen_andi_i32(b2, r1, 0xff0000);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b2, b2, con & 0xff0000);
/* byte 3 */
- tcg_gen_andi_tl(b3, r1, 0xff000000);
- tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
+ tcg_gen_andi_i32(b3, r1, 0xff000000);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, b3, b3, con & 0xff000000);
/* combine them */
- tcg_gen_or_tl(ret, b0, b1);
- tcg_gen_or_tl(ret, ret, b2);
- tcg_gen_or_tl(ret, ret, b3);
+ tcg_gen_or_i32(ret, b0, b1);
+ tcg_gen_or_i32(ret, ret, b2);
+ tcg_gen_or_i32(ret, ret, b3);
}
static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
@@ -2711,15 +2711,15 @@ static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
TCGv h1 = tcg_temp_new();
/* halfword 0 */
- tcg_gen_andi_tl(h0, r1, 0xffff);
- tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
+ tcg_gen_andi_i32(h0, r1, 0xffff);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, h0, h0, con & 0xffff);
/* halfword 1 */
- tcg_gen_andi_tl(h1, r1, 0xffff0000);
- tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
+ tcg_gen_andi_i32(h1, r1, 0xffff0000);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, h1, h1, con & 0xffff0000);
/* combine them */
- tcg_gen_or_tl(ret, h0, h1);
+ tcg_gen_or_i32(ret, h0, h1);
}
/* mask = ((1 << width) -1) << pos;
@@ -2730,14 +2730,14 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
- tcg_gen_shl_tl(mask, tcg_constant_tl(1), width);
- tcg_gen_subi_tl(mask, mask, 1);
- tcg_gen_shl_tl(mask, mask, pos);
+ tcg_gen_shl_i32(mask, tcg_constant_i32(1), width);
+ tcg_gen_subi_i32(mask, mask, 1);
+ tcg_gen_shl_i32(mask, mask, pos);
- tcg_gen_shl_tl(temp, r2, pos);
- tcg_gen_and_tl(temp, temp, mask);
- tcg_gen_andc_tl(temp2, r1, mask);
- tcg_gen_or_tl(ret, temp, temp2);
+ tcg_gen_shl_i32(temp, r2, pos);
+ tcg_gen_and_i32(temp, temp, mask);
+ tcg_gen_andc_i32(temp2, r1, mask);
+ tcg_gen_or_i32(ret, temp, temp2);
}
static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
@@ -2786,35 +2786,35 @@ static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high)
{
TCGv temp = tcg_temp_new();
/* calc AV bit */
- tcg_gen_add_tl(temp, arg_low, arg_low);
- tcg_gen_xor_tl(temp, temp, arg_low);
- tcg_gen_add_tl(cpu_PSW_AV, arg_high, arg_high);
- tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, arg_high);
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ tcg_gen_add_i32(temp, arg_low, arg_low);
+ tcg_gen_xor_i32(temp, temp, arg_low);
+ tcg_gen_add_i32(cpu_PSW_AV, arg_high, arg_high);
+ tcg_gen_xor_i32(cpu_PSW_AV, cpu_PSW_AV, arg_high);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
}
static void gen_calc_usb_mulr_h(TCGv arg)
{
TCGv temp = tcg_temp_new();
/* calc AV bit */
- tcg_gen_add_tl(temp, arg, arg);
- tcg_gen_xor_tl(temp, temp, arg);
- tcg_gen_shli_tl(cpu_PSW_AV, temp, 16);
- tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ tcg_gen_add_i32(temp, arg, arg);
+ tcg_gen_xor_i32(temp, temp, arg);
+ tcg_gen_shli_i32(cpu_PSW_AV, temp, 16);
+ tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp);
/* calc SAV bit */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
/* clear V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
}
/* helpers for generating program flow micro-ops */
static inline void gen_save_pc(vaddr pc)
{
- tcg_gen_movi_tl(cpu_PC, pc);
+ tcg_gen_movi_i32(cpu_PC, pc);
}
static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_index, vaddr dest)
@@ -2844,7 +2844,7 @@ static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
TCGv r2, int16_t address)
{
TCGLabel *jumpLabel = gen_new_label();
- tcg_gen_brcond_tl(cond, r1, r2, jumpLabel);
+ tcg_gen_brcond_i32(cond, r1, r2, jumpLabel);
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
@@ -2863,8 +2863,8 @@ static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
{
TCGLabel *l1 = gen_new_label();
- tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
+ tcg_gen_subi_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
gen_goto_tb(ctx, 1, ctx->base.pc_next + offset);
gen_set_label(l1);
gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
@@ -2874,20 +2874,20 @@ static void gen_fcall_save_ctx(DisasContext *ctx)
{
TCGv temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[10], -4);
- tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
- tcg_gen_mov_tl(cpu_gpr_a[10], temp);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[10], -4);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_mov_i32(cpu_gpr_a[10], temp);
}
static void gen_fret(DisasContext *ctx)
{
TCGv temp = tcg_temp_new();
- tcg_gen_andi_tl(temp, cpu_gpr_a[11], ~0x1);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[10], cpu_gpr_a[10], 4);
- tcg_gen_mov_tl(cpu_PC, temp);
+ tcg_gen_andi_i32(temp, cpu_gpr_a[11], ~0x1);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_i32(cpu_gpr_a[10], cpu_gpr_a[10], 4);
+ tcg_gen_mov_i32(cpu_PC, temp);
ctx->base.is_jmp = DISAS_EXIT;
}
@@ -2932,12 +2932,12 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
/* SBRN-format jumps */
case OPC1_16_SBRN_JZ_T:
temp = tcg_temp_new();
- tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant);
gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
break;
case OPC1_16_SBRN_JNZ_T:
temp = tcg_temp_new();
- tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant);
gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
break;
/* SBR-format jumps */
@@ -2986,7 +2986,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
break;
/* SR-format jumps */
case OPC1_16_SR_JI:
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
ctx->base.is_jmp = DISAS_EXIT;
break;
case OPC2_32_SYS_RET:
@@ -3008,13 +3008,13 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
break;
case OPC1_32_B_JLA:
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
/* fall through */
case OPC1_32_B_JA:
gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
break;
case OPC1_32_B_JL:
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2);
break;
/* BOL format */
@@ -3046,14 +3046,14 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
case OPCM_32_BRC_JNE:
temp = tcg_temp_new();
if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* subi is unconditional */
- tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_subi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
} else {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* addi is unconditional */
- tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_addi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
}
break;
@@ -3062,7 +3062,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
n = MASK_OP_BRN_N(ctx->opcode);
temp = tcg_temp_new();
- tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n));
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r1], (1 << n));
if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
@@ -3119,18 +3119,18 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
temp = tcg_temp_new();
temp2 = tcg_temp_new();
if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* also save r2, in case of r1 == r2, so r2 is not decremented */
- tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(temp2, cpu_gpr_d[r2]);
/* subi is unconditional */
- tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_subi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
} else {
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* also save r2, in case of r1 == r2, so r2 is not decremented */
- tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(temp2, cpu_gpr_d[r2]);
/* addi is unconditional */
- tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ tcg_gen_addi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
}
break;
@@ -3171,7 +3171,7 @@ static void decode_src_opc(DisasContext *ctx, int op1)
gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
break;
case OPC1_16_SRC_ADD_A:
- tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
+ tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
break;
case OPC1_16_SRC_CADD:
gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
@@ -3182,37 +3182,37 @@ static void decode_src_opc(DisasContext *ctx, int op1)
cpu_gpr_d[15]);
break;
case OPC1_16_SRC_CMOV:
- temp = tcg_constant_tl(0);
- temp2 = tcg_constant_tl(const4);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const4);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
case OPC1_16_SRC_CMOVN:
- temp = tcg_constant_tl(0);
- temp2 = tcg_constant_tl(const4);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const4);
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
case OPC1_16_SRC_EQ:
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
const4);
break;
case OPC1_16_SRC_LT:
- tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcondi_i32(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
const4);
break;
case OPC1_16_SRC_MOV:
- tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
+ tcg_gen_movi_i32(cpu_gpr_d[r1], const4);
break;
case OPC1_16_SRC_MOV_A:
const4 = MASK_OP_SRC_CONST4(ctx->opcode);
- tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
+ tcg_gen_movi_i32(cpu_gpr_a[r1], const4);
break;
case OPC1_16_SRC_MOV_E:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r1);
- tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
- tcg_gen_sari_tl(cpu_gpr_d[r1+1], cpu_gpr_d[r1], 31);
+ tcg_gen_movi_i32(cpu_gpr_d[r1], const4);
+ tcg_gen_sari_i32(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], 31);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -3247,49 +3247,49 @@ static void decode_srr_opc(DisasContext *ctx, int op1)
gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_ADD_A:
- tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_add_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
case OPC1_16_SRR_ADDS:
gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_AND:
- tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_and_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_CMOV:
- temp = tcg_constant_tl(0);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
case OPC1_16_SRR_CMOVN:
- temp = tcg_constant_tl(0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp = tcg_constant_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
case OPC1_16_SRR_EQ:
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_LT:
- tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_MOV:
- tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_MOV_A:
- tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_MOV_AA:
- tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
case OPC1_16_SRR_MOV_D:
- tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r1], cpu_gpr_a[r2]);
break;
case OPC1_16_SRR_MUL:
gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_OR:
- tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_or_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_SUB:
gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
@@ -3304,7 +3304,7 @@ static void decode_srr_opc(DisasContext *ctx, int op1)
gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_XOR:
- tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_xor_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3320,32 +3320,32 @@ static void decode_ssr_opc(DisasContext *ctx, int op1)
switch (op1) {
case OPC1_16_SSR_ST_A:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
break;
case OPC1_16_SSR_ST_A_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
case OPC1_16_SSR_ST_B:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
break;
case OPC1_16_SSR_ST_B_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
break;
case OPC1_16_SSR_ST_H:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
break;
case OPC1_16_SSR_ST_H_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
break;
case OPC1_16_SSR_ST_W:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
break;
case OPC1_16_SSR_ST_W_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3360,7 +3360,7 @@ static void decode_sc_opc(DisasContext *ctx, int op1)
switch (op1) {
case OPC1_16_SC_AND:
- tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ tcg_gen_andi_i32(cpu_gpr_d[15], cpu_gpr_d[15], const16);
break;
case OPC1_16_SC_BISR:
if (ctx->priv == TRICORE_PRIV_SM) {
@@ -3376,10 +3376,10 @@ static void decode_sc_opc(DisasContext *ctx, int op1)
gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
break;
case OPC1_16_SC_MOV:
- tcg_gen_movi_tl(cpu_gpr_d[15], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[15], const16);
break;
case OPC1_16_SC_OR:
- tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ tcg_gen_ori_i32(cpu_gpr_d[15], cpu_gpr_d[15], const16);
break;
case OPC1_16_SC_ST_A:
gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
@@ -3388,7 +3388,7 @@ static void decode_sc_opc(DisasContext *ctx, int op1)
gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
break;
case OPC1_16_SC_SUB_A:
- tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
+ tcg_gen_subi_i32(cpu_gpr_a[10], cpu_gpr_a[10], const16);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3405,32 +3405,32 @@ static void decode_slr_opc(DisasContext *ctx, int op1)
switch (op1) {
/* SLR-format */
case OPC1_16_SLR_LD_A:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
break;
case OPC1_16_SLR_LD_A_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
case OPC1_16_SLR_LD_BU:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
break;
case OPC1_16_SLR_LD_BU_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
break;
case OPC1_16_SLR_LD_H:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
break;
case OPC1_16_SLR_LD_H_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
break;
case OPC1_16_SLR_LD_W:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
break;
case OPC1_16_SLR_LD_W_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3513,17 +3513,18 @@ static void decode_sr_accu(DisasContext *ctx)
switch (op2) {
case OPC2_16_SR_RSUB:
/* calc V bit -- overflow only if r1 = -0x80000000 */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], -0x80000000);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V,
+ cpu_gpr_d[r1], -0x80000000);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* sub */
- tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_neg_i32(cpu_gpr_d[r1], cpu_gpr_d[r1]);
/* calc av */
- tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
- tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
+ tcg_gen_add_i32(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_xor_i32(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
/* calc sav */
- tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
break;
case OPC2_16_SR_SAT_B:
gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
@@ -3616,8 +3617,8 @@ static void decode_16Bit_opc(DisasContext *ctx)
r1 = MASK_OP_SRRS_S1D(ctx->opcode);
const16 = MASK_OP_SRRS_N(ctx->opcode);
temp = tcg_temp_new();
- tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
- tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
+ tcg_gen_shli_i32(temp, cpu_gpr_d[15], const16);
+ tcg_gen_add_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
break;
/* SLRO-format */
case OPC1_16_SLRO_LD_A:
@@ -3766,7 +3767,7 @@ static void decode_16Bit_opc(DisasContext *ctx)
break;
case OPC1_16_SR_NOT:
r1 = MASK_OP_SR_S1D(ctx->opcode);
- tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_not_i32(cpu_gpr_d[r1], cpu_gpr_d[r1]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3793,18 +3794,18 @@ static void decode_abs_ldw(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_LD_A:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
break;
case OPC2_32_ABS_LD_D:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
break;
case OPC2_32_ABS_LD_DA:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
break;
case OPC2_32_ABS_LD_W:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3826,16 +3827,16 @@ static void decode_abs_ldb(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_LD_B:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
break;
case OPC2_32_ABS_LD_BU:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
break;
case OPC2_32_ABS_LD_H:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
break;
case OPC2_32_ABS_LD_HU:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3908,18 +3909,18 @@ static void decode_abs_store(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_ST_A:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
break;
case OPC2_32_ABS_ST_D:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
break;
case OPC2_32_ABS_ST_DA:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
break;
case OPC2_32_ABS_ST_W:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -3941,10 +3942,10 @@ static void decode_abs_storeb_h(DisasContext *ctx)
switch (op2) {
case OPC2_32_ABS_ST_B:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
break;
case OPC2_32_ABS_ST_H:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4043,11 +4044,11 @@ static void decode_bit_insert(DisasContext *ctx)
temp = tcg_temp_new();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r2], pos2);
if (op2 == OPC2_32_BIT_INSN_T) {
- tcg_gen_not_tl(temp, temp);
+ tcg_gen_not_i32(temp, temp);
}
- tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
+ tcg_gen_deposit_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
}
static void decode_bit_logical_t2(DisasContext *ctx)
@@ -4163,8 +4164,8 @@ static void decode_bit_sh_logic1(DisasContext *ctx)
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
- tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
}
static void decode_bit_sh_logic2(DisasContext *ctx)
@@ -4203,8 +4204,8 @@ static void decode_bit_sh_logic2(DisasContext *ctx)
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
- tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
}
/* BO-format */
@@ -4233,14 +4234,14 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
case OPC2_32_BO_CACHEA_I_POSTINC:
/* instruction to access the cache, but we still need to handle
the addressing mode */
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_CACHEA_WI_PREINC:
case OPC2_32_BO_CACHEA_W_PREINC:
case OPC2_32_BO_CACHEA_I_PREINC:
/* instruction to access the cache, but we still need to handle
the addressing mode */
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_CACHEI_WI_SHORTOFF:
case OPC2_32_BO_CACHEI_W_SHORTOFF:
@@ -4251,7 +4252,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
case OPC2_32_BO_CACHEI_W_POSTINC:
case OPC2_32_BO_CACHEI_WI_POSTINC:
if (has_feature(ctx, TRICORE_FEATURE_131)) {
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -4259,7 +4260,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
case OPC2_32_BO_CACHEI_W_PREINC:
case OPC2_32_BO_CACHEI_WI_PREINC:
if (has_feature(ctx, TRICORE_FEATURE_131)) {
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -4268,9 +4269,9 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
break;
case OPC2_32_BO_ST_A_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LESL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_A_PREINC:
gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
@@ -4279,82 +4280,82 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_ST_B_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_B_PREINC:
gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_ST_D_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ gen_offset_st_2regs(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
off10, ctx);
break;
case OPC2_32_BO_ST_D_POSTINC:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_D_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_ST_DA_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ gen_offset_st_2regs(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
off10, ctx);
break;
case OPC2_32_BO_ST_DA_POSTINC:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_DA_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_ST_H_SHORTOFF:
gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_H_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_H_PREINC:
gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_Q_SHORTOFF:
temp = tcg_temp_new();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_Q_POSTINC:
temp = tcg_temp_new();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp, cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_Q_PREINC:
temp = tcg_temp_new();
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_W_SHORTOFF:
gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
break;
case OPC2_32_BO_ST_W_POSTINC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_W_PREINC:
gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
@@ -4380,91 +4381,91 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx)
temp2 = tcg_temp_new();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
- tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
switch (op2) {
case OPC2_32_BO_CACHEA_WI_BR:
case OPC2_32_BO_CACHEA_W_BR:
case OPC2_32_BO_CACHEA_I_BR:
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_CACHEA_WI_CIRC:
case OPC2_32_BO_CACHEA_W_CIRC:
case OPC2_32_BO_CACHEA_I_CIRC:
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_A_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_A_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_B_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_B_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_D_BR:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_D_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_DA_BR:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_DA_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_i32(cpu_gpr_a[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_H_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_H_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_Q_BR:
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_Q_CIRC:
- tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_ST_W_BR:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_W_CIRC:
- tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4488,9 +4489,9 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
break;
case OPC2_32_BO_LD_A_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_A_PREINC:
gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
@@ -4499,9 +4500,9 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
break;
case OPC2_32_BO_LD_B_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_SB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_B_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
@@ -4510,54 +4511,54 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_LD_BU_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_UB);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_BU_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
break;
case OPC2_32_BO_LD_D_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ gen_offset_ld_2regs(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
off10, ctx);
break;
case OPC2_32_BO_LD_D_POSTINC:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_D_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_LD_DA_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ gen_offset_ld_2regs(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
off10, ctx);
break;
case OPC2_32_BO_LD_DA_POSTINC:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_DA_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
- tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
+ tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_LD_H_SHORTOFF:
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
break;
case OPC2_32_BO_LD_H_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LESW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_H_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
@@ -4566,34 +4567,34 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_LD_HU_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_HU_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_LD_Q_SHORTOFF:
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
break;
case OPC2_32_BO_LD_Q_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_Q_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
break;
case OPC2_32_BO_LD_W_SHORTOFF:
gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
break;
case OPC2_32_BO_LD_W_POSTINC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUL);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_W_PREINC:
gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
@@ -4619,98 +4620,98 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx)
temp2 = tcg_temp_new();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
- tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
switch (op2) {
case OPC2_32_BO_LD_A_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_A_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_B_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_B_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_BU_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_BU_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_D_BR:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_D_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_DA_BR:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_DA_CIRC:
CHECK_REG_PAIR(r1);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
- tcg_gen_addi_tl(temp, temp, 4);
- tcg_gen_rem_tl(temp, temp, temp2);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16);
+ tcg_gen_addi_i32(temp, temp, 4);
+ tcg_gen_rem_i32(temp, temp, temp2);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1 + 1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_H_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_H_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_HU_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_HU_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_Q_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_Q_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_LD_W_BR:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_W_CIRC:
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4735,70 +4736,70 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx)
switch (op2) {
case OPC2_32_BO_LDLCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_ldlcx(tcg_env, temp);
break;
case OPC2_32_BO_LDMST_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_ldmst(ctx, r1, temp);
break;
case OPC2_32_BO_LDMST_POSTINC:
gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LDMST_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
break;
case OPC2_32_BO_LDUCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_lducx(tcg_env, temp);
break;
case OPC2_32_BO_LEA_SHORTOFF:
- tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_STLCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_stlcx(tcg_env, temp);
break;
case OPC2_32_BO_STUCX_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_helper_stucx(tcg_env, temp);
break;
case OPC2_32_BO_SWAP_W_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_swap(ctx, r1, temp);
break;
case OPC2_32_BO_SWAP_W_POSTINC:
gen_swap(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_SWAP_W_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_swap(ctx, r1, cpu_gpr_a[r2]);
break;
case OPC2_32_BO_CMPSWAP_W_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_cmpswap(ctx, r1, temp);
break;
case OPC2_32_BO_CMPSWAP_W_POSTINC:
gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_CMPSWAP_W_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
break;
case OPC2_32_BO_SWAPMSK_W_SHORTOFF:
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_swapmsk(ctx, r1, temp);
break;
case OPC2_32_BO_SWAPMSK_W_POSTINC:
gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_SWAPMSK_W_PREINC:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
break;
default:
@@ -4822,41 +4823,41 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx)
temp2 = tcg_temp_new();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
- tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
- tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
+ tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp);
switch (op2) {
case OPC2_32_BO_LDMST_BR:
gen_ldmst(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LDMST_CIRC:
gen_ldmst(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_SWAP_W_BR:
gen_swap(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_SWAP_W_CIRC:
gen_swap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_CMPSWAP_W_BR:
gen_cmpswap(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_CMPSWAP_W_CIRC:
gen_cmpswap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
case OPC2_32_BO_SWAPMSK_W_BR:
gen_swapmsk(ctx, r1, temp2);
- gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_SWAPMSK_W_CIRC:
gen_swapmsk(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
+ gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4876,16 +4877,16 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1)
switch (op1) {
case OPC1_32_BOL_LD_A_LONGOFF:
temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
- tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
break;
case OPC1_32_BOL_LD_W_LONGOFF:
temp = tcg_temp_new();
- tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
break;
case OPC1_32_BOL_LEA_LONGOFF:
- tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
+ tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
break;
case OPC1_32_BOL_ST_A_LONGOFF:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
@@ -4959,26 +4960,26 @@ static void decode_rc_logical_shift(DisasContext *ctx)
switch (op2) {
case OPC2_32_RC_AND:
- tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_andi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_ANDN:
- tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ tcg_gen_andi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
break;
case OPC2_32_RC_NAND:
temp = tcg_temp_new();
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_nand_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_NOR:
temp = tcg_temp_new();
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_nor_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_OR:
- tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_ori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_ORN:
- tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ tcg_gen_ori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
break;
case OPC2_32_RC_SH:
const9 = sextract32(const9, 0, 6);
@@ -5000,11 +5001,11 @@ static void decode_rc_logical_shift(DisasContext *ctx)
gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_XNOR:
- tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
- tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]);
+ tcg_gen_xori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_not_i32(cpu_gpr_d[r2], cpu_gpr_d[r2]);
break;
case OPC2_32_RC_XOR:
- tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_xori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_SHUFFLE:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
@@ -5084,7 +5085,7 @@ static void decode_rc_accumulator(DisasContext *ctx)
const9, &tcg_gen_and_tl);
break;
case OPC2_32_RC_EQ:
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_EQANY_B:
gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
@@ -5093,41 +5094,41 @@ static void decode_rc_accumulator(DisasContext *ctx)
gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_GE:
- tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_GE_U:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
- tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_LT:
- tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_LT_U:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
- tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_MAX:
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_movcond_i32(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_MAX_U:
- tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
- tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_i32(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_MIN:
- tcg_gen_movi_tl(temp, const9);
- tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, const9);
+ tcg_gen_movcond_i32(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_MIN_U:
- tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
- tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ tcg_gen_movi_i32(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_i32(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_NE:
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_OR_EQ:
gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
@@ -5156,15 +5157,15 @@ static void decode_rc_accumulator(DisasContext *ctx)
const9, &tcg_gen_or_tl);
break;
case OPC2_32_RC_RSUB:
- tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movi_i32(temp, const9);
gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
break;
case OPC2_32_RC_RSUBS:
- tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movi_i32(temp, const9);
gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
break;
case OPC2_32_RC_RSUBS_U:
- tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movi_i32(temp, const9);
gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
break;
case OPC2_32_RC_SH_EQ:
@@ -5260,7 +5261,7 @@ static void decode_rc_mul(DisasContext *ctx)
break;
case OPC2_32_RC_MUL_64:
CHECK_REG_PAIR(r2);
- gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2 + 1], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_MULS_32:
gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
@@ -5268,7 +5269,7 @@ static void decode_rc_mul(DisasContext *ctx)
case OPC2_32_RC_MUL_U_64:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
CHECK_REG_PAIR(r2);
- gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2 + 1], cpu_gpr_d[r1], const9);
break;
case OPC2_32_RC_MULS_U_32:
const9 = MASK_OP_RC_CONST9(ctx->opcode);
@@ -5300,18 +5301,18 @@ static void decode_rcpw_insert(DisasContext *ctx)
CHECK_REG_PAIR(r2);
/* if pos + width > 32 undefined result */
if (pos + width <= 32) {
- tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos);
- tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos));
+ tcg_gen_movi_i32(cpu_gpr_d[r2 + 1], ((1u << width) - 1) << pos);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], (const4 << pos));
}
break;
case OPC2_32_RCPW_INSERT:
- /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
+ /* tcg_gen_deposit_i32() does not handle the case of width = 0 */
if (width == 0) {
- tcg_gen_mov_tl(cpu_gpr_d[r2], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r2], cpu_gpr_d[r1]);
/* if pos + width > 32 undefined result */
} else if (pos + width <= 32) {
temp = tcg_constant_i32(const4);
- tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
+ tcg_gen_deposit_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
}
break;
default:
@@ -5342,18 +5343,18 @@ static void decode_rcrw_insert(DisasContext *ctx)
switch (op2) {
case OPC2_32_RCRW_IMASK:
CHECK_REG_PAIR(r4);
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_movi_tl(temp2, (1 << width) - 1);
- tcg_gen_shl_tl(cpu_gpr_d[r4 + 1], temp2, temp);
- tcg_gen_movi_tl(temp2, const4);
- tcg_gen_shl_tl(cpu_gpr_d[r4], temp2, temp);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp2, (1 << width) - 1);
+ tcg_gen_shl_i32(cpu_gpr_d[r4 + 1], temp2, temp);
+ tcg_gen_movi_i32(temp2, const4);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], temp2, temp);
break;
case OPC2_32_RCRW_INSERT:
temp3 = tcg_temp_new();
- tcg_gen_movi_tl(temp, width);
- tcg_gen_movi_tl(temp2, const4);
- tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp, width);
+ tcg_gen_movi_i32(temp2, const4);
+ tcg_gen_andi_i32(temp3, cpu_gpr_d[r3], 0x1f);
gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], temp2, temp, temp3);
break;
default:
@@ -5389,13 +5390,13 @@ static void decode_rcr_cond_select(DisasContext *ctx)
case OPC2_32_RCR_SEL:
temp = tcg_constant_i32(0);
temp2 = tcg_constant_i32(const9);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
case OPC2_32_RCR_SELN:
temp = tcg_constant_i32(0);
temp2 = tcg_constant_i32(const9);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
default:
@@ -5423,8 +5424,8 @@ static void decode_rcr_madd(DisasContext *ctx)
case OPC2_32_RCR_MADD_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MADDS_32:
gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
@@ -5432,15 +5433,15 @@ static void decode_rcr_madd(DisasContext *ctx)
case OPC2_32_RCR_MADDS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MADD_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MADDS_U_32:
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
@@ -5450,8 +5451,8 @@ static void decode_rcr_madd(DisasContext *ctx)
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5478,8 +5479,8 @@ static void decode_rcr_msub(DisasContext *ctx)
case OPC2_32_RCR_MSUB_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MSUBS_32:
gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
@@ -5487,15 +5488,15 @@ static void decode_rcr_msub(DisasContext *ctx)
case OPC2_32_RCR_MSUBS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MSUB_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
case OPC2_32_RCR_MSUBS_U_32:
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
@@ -5505,8 +5506,8 @@ static void decode_rcr_msub(DisasContext *ctx)
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
const9 = MASK_OP_RCR_CONST9(ctx->opcode);
- gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5533,33 +5534,33 @@ static void decode_rlc_opc(DisasContext *ctx,
gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
break;
case OPC1_32_RLC_ADDIH_A:
- tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
+ tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
break;
case OPC1_32_RLC_MFCR:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
gen_mfcr(ctx, cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV:
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV_64:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r2);
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
- tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2 + 1], const16 >> 15);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
break;
case OPC1_32_RLC_MOV_U:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV_H:
- tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
+ tcg_gen_movi_i32(cpu_gpr_d[r2], const16 << 16);
break;
case OPC1_32_RLC_MOVH_A:
- tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
+ tcg_gen_movi_i32(cpu_gpr_a[r2], const16 << 16);
break;
case OPC1_32_RLC_MTCR:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
@@ -5673,7 +5674,7 @@ static void decode_rr_accumulator(DisasContext *ctx)
cpu_gpr_d[r2], &tcg_gen_and_tl);
break;
case OPC2_32_RR_EQ:
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_EQ_B:
@@ -5683,7 +5684,7 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_EQ_W:
- tcg_gen_negsetcond_tl(TCG_COND_EQ, cpu_gpr_d[r3],
+ tcg_gen_negsetcond_i32(TCG_COND_EQ, cpu_gpr_d[r3],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_EQANY_B:
@@ -5693,19 +5694,19 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_eqany_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_GE:
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_GE_U:
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT:
- tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_U:
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_B:
@@ -5721,19 +5722,19 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_W:
- tcg_gen_negsetcond_tl(TCG_COND_LT, cpu_gpr_d[r3],
+ tcg_gen_negsetcond_i32(TCG_COND_LT, cpu_gpr_d[r3],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_LT_WU:
- tcg_gen_negsetcond_tl(TCG_COND_LTU, cpu_gpr_d[r3],
+ tcg_gen_negsetcond_i32(TCG_COND_LTU, cpu_gpr_d[r3],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MAX:
- tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MAX_U:
- tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MAX_B:
@@ -5749,11 +5750,11 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_max_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MIN:
- tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MIN_U:
- tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_movcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MIN_B:
@@ -5769,16 +5770,16 @@ static void decode_rr_accumulator(DisasContext *ctx)
gen_helper_min_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MOV:
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MOV_64:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
temp = tcg_temp_new();
CHECK_REG_PAIR(r3);
- tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
- tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp);
+ tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -5786,14 +5787,14 @@ static void decode_rr_accumulator(DisasContext *ctx)
case OPC2_32_RR_MOVS_64:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r3);
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
- tcg_gen_sari_tl(cpu_gpr_d[r3 + 1], cpu_gpr_d[r2], 31);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_sari_i32(cpu_gpr_d[r3 + 1], cpu_gpr_d[r2], 31);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
break;
case OPC2_32_RR_NE:
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_OR_EQ:
@@ -5926,41 +5927,41 @@ static void decode_rr_logical_shift(DisasContext *ctx)
switch (op2) {
case OPC2_32_RR_AND:
- tcg_gen_and_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_and_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ANDN:
- tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_andc_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_CLO:
- tcg_gen_not_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
- tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS);
+ tcg_gen_not_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clzi_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS);
break;
case OPC2_32_RR_CLO_H:
gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLS:
- tcg_gen_clrsb_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clrsb_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLS_H:
gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLZ:
- tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS);
+ tcg_gen_clzi_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS);
break;
case OPC2_32_RR_CLZ_H:
gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_NAND:
- tcg_gen_nand_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_nand_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_NOR:
- tcg_gen_nor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_nor_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_OR:
- tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_or_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ORN:
- tcg_gen_orc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_orc_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SH:
gen_helper_sh(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
@@ -5978,10 +5979,10 @@ static void decode_rr_logical_shift(DisasContext *ctx)
gen_shas(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_XNOR:
- tcg_gen_eqv_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_eqv_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_XOR:
- tcg_gen_xor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_gen_xor_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6002,52 +6003,52 @@ static void decode_rr_address(DisasContext *ctx)
switch (op2) {
case OPC2_32_RR_ADD_A:
- tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
case OPC2_32_RR_ADDSC_A:
temp = tcg_temp_new();
- tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n);
- tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
+ tcg_gen_shli_i32(temp, cpu_gpr_d[r1], n);
+ tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
break;
case OPC2_32_RR_ADDSC_AT:
temp = tcg_temp_new();
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3);
- tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp);
- tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 3);
+ tcg_gen_add_i32(temp, cpu_gpr_a[r2], temp);
+ tcg_gen_andi_i32(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
break;
case OPC2_32_RR_EQ_A:
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_EQZ:
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
break;
case OPC2_32_RR_GE_A:
- tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_LT_A:
- tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_MOV_A:
- tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_MOV_AA:
- tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_a[r3], cpu_gpr_a[r2]);
break;
case OPC2_32_RR_MOV_D:
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_a[r2]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_a[r2]);
break;
case OPC2_32_RR_NE_A:
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1],
cpu_gpr_a[r2]);
break;
case OPC2_32_RR_NEZ_A:
- tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
break;
case OPC2_32_RR_SUB_A:
- tcg_gen_sub_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ tcg_gen_sub_i32(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6064,19 +6065,19 @@ static void decode_rr_idirect(DisasContext *ctx)
switch (op2) {
case OPC2_32_RR_JI:
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
break;
case OPC2_32_RR_JLI:
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
- tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn);
break;
case OPC2_32_RR_CALLI:
gen_helper_1arg(call, ctx->pc_succ_insn);
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
break;
case OPC2_32_RR_FCALLI:
gen_fcall_save_ctx(ctx);
- tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6103,11 +6104,11 @@ static void decode_rr_divide(DisasContext *ctx)
break;
case OPC2_32_RR_BSPLIT:
CHECK_REG_PAIR(r3);
- gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_DVINIT_B:
CHECK_REG_PAIR(r3);
- gen_dvinit_b(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_dvinit_b(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_DVINIT_BU:
@@ -6115,28 +6116,28 @@ static void decode_rr_divide(DisasContext *ctx)
temp2 = tcg_temp_new();
temp3 = tcg_temp_new();
CHECK_REG_PAIR(r3);
- tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 8);
+ tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 8);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
if (!has_feature(ctx, TRICORE_FEATURE_131)) {
- /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
- tcg_gen_abs_tl(temp, temp3);
- tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ /* overflow = (abs(D[r3 + 1]) >= abs(D[r2])) */
+ tcg_gen_abs_i32(temp, temp3);
+ tcg_gen_abs_i32(temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_PSW_V, temp, temp2);
} else {
/* overflow = (D[b] == 0) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
}
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24);
- tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], 24);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp3);
break;
case OPC2_32_RR_DVINIT_H:
CHECK_REG_PAIR(r3);
- gen_dvinit_h(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_dvinit_h(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_DVINIT_HU:
@@ -6144,24 +6145,24 @@ static void decode_rr_divide(DisasContext *ctx)
temp2 = tcg_temp_new();
temp3 = tcg_temp_new();
CHECK_REG_PAIR(r3);
- tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 16);
+ tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 16);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
if (!has_feature(ctx, TRICORE_FEATURE_131)) {
- /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
- tcg_gen_abs_tl(temp, temp3);
- tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
- tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ /* overflow = (abs(D[r3 + 1]) >= abs(D[r2])) */
+ tcg_gen_abs_i32(temp, temp3);
+ tcg_gen_abs_i32(temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_PSW_V, temp, temp2);
} else {
/* overflow = (D[b] == 0) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
}
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16);
- tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], 16);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp3);
break;
case OPC2_32_RR_DVINIT:
temp = tcg_temp_new();
@@ -6169,41 +6170,41 @@ static void decode_rr_divide(DisasContext *ctx)
CHECK_REG_PAIR(r3);
/* overflow = ((D[b] == 0) ||
((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff);
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0);
- tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000);
+ tcg_gen_and_i32(temp, temp, temp2);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0);
+ tcg_gen_or_i32(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
/* write result */
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
/* sign extend to high reg */
- tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31);
+ tcg_gen_sari_i32(cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], 31);
break;
case OPC2_32_RR_DVINIT_U:
CHECK_REG_PAIR(r3);
/* overflow = (D[b] == 0) */
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
- tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31);
/* sv */
- tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* reset av */
- tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ tcg_gen_movi_i32(cpu_PSW_AV, 0);
/* write result */
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
/* zero extend to high reg*/
- tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r3 + 1], 0);
break;
case OPC2_32_RR_PARITY:
gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_UNPACK:
CHECK_REG_PAIR(r3);
- gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CRC32_B:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
@@ -6229,7 +6230,7 @@ static void decode_rr_divide(DisasContext *ctx)
case OPC2_32_RR_POPCNT_W:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
- tcg_gen_ctpop_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_ctpop_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -6237,7 +6238,7 @@ static void decode_rr_divide(DisasContext *ctx)
case OPC2_32_RR_DIV:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r3);
- GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6246,7 +6247,7 @@ static void decode_rr_divide(DisasContext *ctx)
case OPC2_32_RR_DIV_U:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
CHECK_REG_PAIR(r3);
- GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2]);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6328,69 +6329,69 @@ static void decode_rr1_mul(DisasContext *ctx)
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MUL_H_32_LU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MUL_H_32_UL:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MUL_H_32_UU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
- gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]);
break;
case OPC2_32_RR1_MULM_H_64_LL:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULM_H_64_LU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_LU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULM_H_64_UL:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULM_H_64_UU:
temp64 = tcg_temp_new_i64();
CHECK_REG_PAIR(r3);
GEN_HELPER_UU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
- tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64);
/* reset V bit */
- tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
/* reset AV bit */
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
break;
case OPC2_32_RR1_MULR_H_16_LL:
GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
@@ -6436,45 +6437,45 @@ static void decode_rr1_mulq(DisasContext *ctx)
break;
case OPC2_32_RR1_MUL_Q_64:
CHECK_REG_PAIR(r3);
- gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, 0);
break;
case OPC2_32_RR1_MUL_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
break;
case OPC2_32_RR1_MUL_Q_64_L:
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n, 0);
break;
case OPC2_32_RR1_MUL_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
break;
case OPC2_32_RR1_MUL_Q_64_U:
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n, 0);
break;
case OPC2_32_RR1_MUL_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RR1_MUL_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RR1_MULR_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RR1_MULR_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
break;
default:
@@ -6498,7 +6499,7 @@ static void decode_rr2_mul(DisasContext *ctx)
break;
case OPC2_32_RR2_MUL_64:
CHECK_REG_PAIR(r3);
- gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR2_MULS_32:
@@ -6507,7 +6508,7 @@ static void decode_rr2_mul(DisasContext *ctx)
break;
case OPC2_32_RR2_MUL_U_64:
CHECK_REG_PAIR(r3);
- gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR2_MULS_U_32:
@@ -6537,16 +6538,16 @@ static void decode_rrpw_extract_insert(DisasContext *ctx)
switch (op2) {
case OPC2_32_RRPW_EXTR:
if (width == 0) {
- tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r3], 0);
} else if (pos + width <= 32) {
- tcg_gen_sextract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
+ tcg_gen_sextract_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
}
break;
case OPC2_32_RRPW_EXTR_U:
if (width == 0) {
- tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r3], 0);
} else {
- tcg_gen_extract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
+ tcg_gen_extract_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
}
break;
case OPC2_32_RRPW_IMASK:
@@ -6554,18 +6555,18 @@ static void decode_rrpw_extract_insert(DisasContext *ctx)
if (pos + width <= 32) {
temp = tcg_temp_new();
- tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos);
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
- tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp);
+ tcg_gen_movi_i32(temp, ((1u << width) - 1) << pos);
+ tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
+ tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp);
}
break;
case OPC2_32_RRPW_INSERT:
- /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
+ /* tcg_gen_deposit_i32() does not handle the case of width = 0 */
if (width == 0) {
- tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]);
} else if (pos + width <= 32) {
- tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ tcg_gen_deposit_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos, width);
}
break;
@@ -6606,12 +6607,12 @@ static void decode_rrr_cond_select(DisasContext *ctx)
break;
case OPC2_32_RRR_SEL:
temp = tcg_constant_i32(0);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_SELN:
temp = tcg_constant_i32(0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
default:
@@ -6635,49 +6636,49 @@ static void decode_rrr_divide(DisasContext *ctx)
case OPC2_32_RRR_DVADJ:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_DVSTEP:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_DVSTEP_U:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMAX:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMAX_U:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMIN:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_IXMIN_U:
CHECK_REG_PAIR(r3);
CHECK_REG_PAIR(r4);
- GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_PACK:
CHECK_REG_PAIR(r3);
gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]);
break;
case OPC2_32_RRR_CRCN:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
@@ -6725,8 +6726,8 @@ static void decode_rrr2_madd(DisasContext *ctx)
case OPC2_32_RRR2_MADD_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_32:
gen_helper_madd32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6735,14 +6736,14 @@ static void decode_rrr2_madd(DisasContext *ctx)
case OPC2_32_RRR2_MADDS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADD_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_U_32:
gen_helper_madd32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6751,8 +6752,8 @@ static void decode_rrr2_madd(DisasContext *ctx)
case OPC2_32_RRR2_MADDS_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6778,8 +6779,8 @@ static void decode_rrr2_msub(DisasContext *ctx)
case OPC2_32_RRR2_MSUB_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_32:
gen_helper_msub32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6788,14 +6789,14 @@ static void decode_rrr2_msub(DisasContext *ctx)
case OPC2_32_RRR2_MSUBS_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUB_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_U_32:
gen_helper_msub32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
@@ -6804,8 +6805,8 @@ static void decode_rrr2_msub(DisasContext *ctx)
case OPC2_32_RRR2_MSUBS_U_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
- cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6829,98 +6830,98 @@ static void decode_rrr1_madd(DisasContext *ctx)
case OPC2_32_RRR1_MADD_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADD_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADD_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADD_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDM_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDM_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDM_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDM_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDMS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDMS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDMS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDMS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDR_H_LL:
gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -6983,61 +6984,61 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
case OPC2_32_RRR1_MADD_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MADD_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADD_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADD_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADD_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADD_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADD_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADD_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADD_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_32:
gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -7046,90 +7047,90 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
case OPC2_32_RRR1_MADDS_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MADDS_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADDS_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADDS_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MADDS_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MADDS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDS_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDR_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MADDRS_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MADDR_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDR_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDRS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MADDRS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
default:
@@ -7153,109 +7154,109 @@ static void decode_rrr1_maddsu_h(DisasContext *ctx)
case OPC2_32_RRR1_MADDSU_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSU_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSU_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSU_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUS_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSUS_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSUS_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSUS_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUM_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSUM_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSUM_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSUM_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MADDSUMS_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MADDSUR_H_16_LL:
@@ -7311,98 +7312,98 @@ static void decode_rrr1_msub(DisasContext *ctx)
case OPC2_32_RRR1_MSUB_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUB_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUB_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUB_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBM_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBM_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBM_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBM_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBMS_H_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBMS_H_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBMS_H_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBMS_H_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBR_H_LL:
gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -7465,61 +7466,61 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
case OPC2_32_RRR1_MSUB_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MSUB_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUB_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUB_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUB_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUB_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUB_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUB_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUB_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_32:
gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
@@ -7528,90 +7529,90 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
case OPC2_32_RRR1_MSUBS_Q_64:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_L:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUBS_Q_64_L:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
- gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_U:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
temp, n, 16);
break;
case OPC2_32_RRR1_MSUBS_Q_64_U:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
- gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp,
n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
- gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBS_Q_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
- gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], temp, temp2, n);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBR_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MSUBRS_H_64_UL:
CHECK_REG_PAIR(r3);
- gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1],
cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
break;
case OPC2_32_RRR1_MSUBR_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBR_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBRS_Q_32_LL:
- tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
- tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]);
gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
case OPC2_32_RRR1_MSUBRS_Q_32_UU:
- tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
- tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16);
gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
break;
default:
@@ -7635,109 +7636,109 @@ static void decode_rrr1_msubad_h(DisasContext *ctx)
case OPC2_32_RRR1_MSUBAD_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBAD_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBAD_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBAD_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADS_H_32_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBADS_H_32_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBADS_H_32_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBADS_H_32_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADM_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBADM_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBADM_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBADM_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_LL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LL);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_LU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_LU);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_UL:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UL);
break;
case OPC2_32_RRR1_MSUBADMS_H_64_UU:
CHECK_REG_PAIR(r4);
CHECK_REG_PAIR(r3);
- gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
- cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2],
n, MODE_UU);
break;
case OPC2_32_RRR1_MSUBADR_H_16_LL:
@@ -7795,43 +7796,43 @@ static void decode_rrrr_extract_insert(DisasContext *ctx)
switch (op2) {
case OPC2_32_RRRR_DEXTR:
- tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f);
if (r1 == r2) {
- tcg_gen_rotl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_rotl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
} else {
TCGv msw = tcg_temp_new();
- TCGv zero = tcg_constant_tl(0);
- tcg_gen_shl_tl(tmp_width, cpu_gpr_d[r1], tmp_pos);
- tcg_gen_subfi_tl(msw, 32, tmp_pos);
- tcg_gen_shr_tl(msw, cpu_gpr_d[r2], msw);
+ TCGv zero = tcg_constant_i32(0);
+ tcg_gen_shl_i32(tmp_width, cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_i32(msw, 32, tmp_pos);
+ tcg_gen_shr_i32(msw, cpu_gpr_d[r2], msw);
/*
* if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
* behaviour. So check that case here and set the low bits to zero
* which effectivly returns cpu_gpr_d[r1]
*/
- tcg_gen_movcond_tl(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw);
- tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, msw);
+ tcg_gen_movcond_i32(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw);
+ tcg_gen_or_i32(cpu_gpr_d[r4], tmp_width, msw);
}
break;
case OPC2_32_RRRR_EXTR:
case OPC2_32_RRRR_EXTR_U:
CHECK_REG_PAIR(r3);
- tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
- tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
- tcg_gen_add_tl(tmp_pos, tmp_pos, tmp_width);
- tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
- tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
- tcg_gen_subfi_tl(tmp_width, 32, tmp_width);
+ tcg_gen_andi_i32(tmp_width, cpu_gpr_d[r3 + 1], 0x1f);
+ tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_add_i32(tmp_pos, tmp_pos, tmp_width);
+ tcg_gen_subfi_i32(tmp_pos, 32, tmp_pos);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_i32(tmp_width, 32, tmp_width);
if (op2 == OPC2_32_RRRR_EXTR) {
- tcg_gen_sar_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ tcg_gen_sar_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
} else {
- tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ tcg_gen_shr_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
}
break;
case OPC2_32_RRRR_INSERT:
CHECK_REG_PAIR(r3);
- tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
- tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_andi_i32(tmp_width, cpu_gpr_d[r3 + 1], 0x1f);
+ tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f);
gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], tmp_width,
tmp_pos);
break;
@@ -7860,35 +7861,35 @@ static void decode_rrrw_extract_insert(DisasContext *ctx)
switch (op2) {
case OPC2_32_RRRW_EXTR:
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_addi_tl(temp, temp, width);
- tcg_gen_subfi_tl(temp, 32, temp);
- tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
- tcg_gen_sari_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_addi_i32(temp, temp, width);
+ tcg_gen_subfi_i32(temp, 32, temp);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_sari_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width);
break;
case OPC2_32_RRRW_EXTR_U:
if (width == 0) {
- tcg_gen_movi_tl(cpu_gpr_d[r4], 0);
+ tcg_gen_movi_i32(cpu_gpr_d[r4], 0);
} else {
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
- tcg_gen_andi_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32-width));
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_shr_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_andi_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32 - width));
}
break;
case OPC2_32_RRRW_IMASK:
temp2 = tcg_temp_new();
CHECK_REG_PAIR(r4);
- tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
- tcg_gen_movi_tl(temp2, (1 << width) - 1);
- tcg_gen_shl_tl(temp2, temp2, temp);
- tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp);
- tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2);
+ tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp2, (1 << width) - 1);
+ tcg_gen_shl_i32(temp2, temp2, temp);
+ tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r2], temp);
+ tcg_gen_mov_i32(cpu_gpr_d[r4 + 1], temp2);
break;
case OPC2_32_RRRW_INSERT:
temp2 = tcg_temp_new();
- tcg_gen_movi_tl(temp, width);
- tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_i32(temp, width);
+ tcg_gen_andi_i32(temp2, cpu_gpr_d[r3], 0x1f);
gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2);
break;
default:
@@ -7913,7 +7914,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_DISABLE:
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
+ tcg_gen_andi_i32(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
} else {
generate_trap(ctx, TRAPC_PROT, TIN1_PRIV);
}
@@ -7921,9 +7922,9 @@ static void decode_sys_interrupts(DisasContext *ctx)
case OPC2_32_SYS_DISABLE_D:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_extract_tl(cpu_gpr_d[r1], cpu_ICR,
+ tcg_gen_extract_i32(cpu_gpr_d[r1], cpu_ICR,
ctx->icr_ie_offset, 1);
- tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
+ tcg_gen_andi_i32(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask);
} else {
generate_trap(ctx, TRAPC_PROT, TIN1_PRIV);
}
@@ -7934,7 +7935,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_ENABLE:
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_ori_tl(cpu_ICR, cpu_ICR, ctx->icr_ie_mask);
+ tcg_gen_ori_i32(cpu_ICR, cpu_ICR, ctx->icr_ie_mask);
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
} else {
generate_trap(ctx, TRAPC_PROT, TIN1_PRIV);
@@ -7960,8 +7961,8 @@ static void decode_sys_interrupts(DisasContext *ctx)
l1 = gen_new_label();
tcg_gen_ld_i32(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR));
- tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE);
- tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1);
+ tcg_gen_andi_i32(tmp, tmp, MASK_DBGSR_DE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 1, l1);
gen_helper_rfm(tcg_env);
gen_set_label(l1);
ctx->base.is_jmp = DISAS_EXIT;
@@ -7978,7 +7979,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
case OPC2_32_SYS_RESTORE:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) {
- tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1],
+ tcg_gen_deposit_i32(cpu_ICR, cpu_ICR, cpu_gpr_d[r1],
ctx->icr_ie_offset, 1);
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
} else {
@@ -7990,13 +7991,13 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_TRAPSV:
l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_SV, 0, l1);
+ tcg_gen_brcondi_i32(TCG_COND_GE, cpu_PSW_SV, 0, l1);
generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF);
gen_set_label(l1);
break;
case OPC2_32_SYS_TRAPV:
l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_V, 0, l1);
+ tcg_gen_brcondi_i32(TCG_COND_GE, cpu_PSW_V, 0, l1);
generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF);
gen_set_label(l1);
break;
@@ -8047,16 +8048,16 @@ static void decode_32Bit_opc(DisasContext *ctx)
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
temp2 = tcg_temp_new();
- tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
- tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shri_i32(temp2, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_i32(temp2, temp, ctx->mem_idx, MO_LEUW);
break;
case OPC1_32_ABS_LD_Q:
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
- tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
- tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
break;
case OPCM_32_ABS_LEA_LHA:
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -8065,13 +8066,13 @@ static void decode_32Bit_opc(DisasContext *ctx)
if (has_feature(ctx, TRICORE_FEATURE_162)) {
op2 = MASK_OP_ABS_OP2(ctx->opcode);
if (op2 == OPC2_32_ABS_LHA) {
- tcg_gen_movi_tl(cpu_gpr_a[r1], address << 14);
+ tcg_gen_movi_i32(cpu_gpr_a[r1], address << 14);
break;
}
/* otherwise translate regular LEA */
}
- tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
+ tcg_gen_movi_i32(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
break;
/* ABSB-format */
case OPC1_32_ABSB_ST_T:
@@ -8082,10 +8083,10 @@ static void decode_32Bit_opc(DisasContext *ctx)
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
temp2 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos));
- tcg_gen_ori_tl(temp2, temp2, (b << bpos));
- tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB);
+ tcg_gen_qemu_ld_i32(temp2, temp, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_i32(temp2, temp2, ~(0x1u << bpos));
+ tcg_gen_ori_i32(temp2, temp2, (b << bpos));
+ tcg_gen_qemu_st_i32(temp2, temp, ctx->mem_idx, MO_UB);
break;
/* B-format */
case OPC1_32_B_CALL:
@@ -8212,8 +8213,8 @@ static void decode_32Bit_opc(DisasContext *ctx)
CHECK_REG_PAIR(r2);
- tcg_gen_andi_tl(temp2, cpu_gpr_d[r2 + 1], 0x1f);
- tcg_gen_andi_tl(temp3, cpu_gpr_d[r2], 0x1f);
+ tcg_gen_andi_i32(temp2, cpu_gpr_d[r2 + 1], 0x1f);
+ tcg_gen_andi_i32(temp3, cpu_gpr_d[r2], 0x1f);
gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, temp2, temp3);
break;
@@ -8281,7 +8282,7 @@ static void decode_32Bit_opc(DisasContext *ctx)
r3 = MASK_OP_RRPW_D(ctx->opcode);
const16 = MASK_OP_RRPW_POS(ctx->opcode);
- tcg_gen_extract2_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], cpu_gpr_d[r1],
+ tcg_gen_extract2_i32(cpu_gpr_d[r3], cpu_gpr_d[r2], cpu_gpr_d[r1],
32 - const16);
break;
/* RRR Format */
@@ -8330,10 +8331,10 @@ static void decode_32Bit_opc(DisasContext *ctx)
decode_sys_interrupts(ctx);
break;
case OPC1_32_SYS_RSTV:
- tcg_gen_movi_tl(cpu_PSW_V, 0);
- tcg_gen_mov_tl(cpu_PSW_SV, cpu_PSW_V);
- tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
- tcg_gen_mov_tl(cpu_PSW_SAV, cpu_PSW_V);
+ tcg_gen_movi_i32(cpu_PSW_V, 0);
+ tcg_gen_mov_i32(cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_i32(cpu_PSW_SAV, cpu_PSW_V);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 68/75] target/tricore: Pass DisasContext as first argument
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (66 preceding siblings ...)
2025-10-16 12:15 ` [PULL 67/75] target/tricore: Expand TCG helpers for 32-bit target Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 69/75] target/tricore: Un-inline various helpers Philippe Mathieu-Daudé
` (6 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Unify style, always pass DisasContext as the first argument.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-11-philmd@linaro.org>
---
target/tricore/translate.c | 72 ++++++++++++++++++++------------------
1 file changed, 38 insertions(+), 34 deletions(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index ef3653b8820..62a2d807266 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -226,7 +226,7 @@ static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop);
}
-static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+static void gen_st_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -234,15 +234,15 @@ static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ);
}
-static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
- DisasContext *ctx)
+static void gen_offset_st_2regs(DisasContext *ctx,
+ TCGv rh, TCGv rl, TCGv base, int16_t con)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_i32(temp, base, con);
- gen_st_2regs_64(rh, rl, temp, ctx);
+ gen_st_2regs_64(ctx, rh, rl, temp);
}
-static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+static void gen_ld_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -251,12 +251,12 @@ static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
- DisasContext *ctx)
+static void gen_offset_ld_2regs(DisasContext *ctx,
+ TCGv rh, TCGv rl, TCGv base, int16_t con)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_i32(temp, base, con);
- gen_ld_2regs_64(rh, rl, temp, ctx);
+ gen_ld_2regs_64(ctx, rh, rl, temp);
}
static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
@@ -3798,11 +3798,11 @@ static void decode_abs_ldw(DisasContext *ctx)
break;
case OPC2_32_ABS_LD_D:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
break;
case OPC2_32_ABS_LD_DA:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
break;
case OPC2_32_ABS_LD_W:
tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
@@ -3913,11 +3913,11 @@ static void decode_abs_store(DisasContext *ctx)
break;
case OPC2_32_ABS_ST_D:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
break;
case OPC2_32_ABS_ST_DA:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
break;
case OPC2_32_ABS_ST_W:
tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
@@ -4289,36 +4289,38 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
break;
case OPC2_32_BO_ST_D_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_st_2regs(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_st_2regs(ctx,
+ cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_ST_D_POSTINC:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2]);
tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_D_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
- gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_ST_DA_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_st_2regs(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_st_2regs(ctx,
+ cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_ST_DA_POSTINC:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_DA_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
- gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_ST_H_SHORTOFF:
@@ -4413,7 +4415,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx)
break;
case OPC2_32_BO_ST_D_BR:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2);
gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_D_CIRC:
@@ -4428,7 +4430,7 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx)
break;
case OPC2_32_BO_ST_DA_BR:
CHECK_REG_PAIR(r1);
- gen_st_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2, ctx);
+ gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2);
gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_ST_DA_CIRC:
@@ -4520,36 +4522,38 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
break;
case OPC2_32_BO_LD_D_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_ld_2regs(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_ld_2regs(ctx,
+ cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_LD_D_POSTINC:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2]);
tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_D_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
- gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_LD_DA_SHORTOFF:
CHECK_REG_PAIR(r1);
- gen_offset_ld_2regs(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
- off10, ctx);
+ gen_offset_ld_2regs(ctx,
+ cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10);
break;
case OPC2_32_BO_LD_DA_POSTINC:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_LD_DA_PREINC:
CHECK_REG_PAIR(r1);
temp = tcg_temp_new();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
- gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
break;
case OPC2_32_BO_LD_H_SHORTOFF:
@@ -4651,7 +4655,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx)
break;
case OPC2_32_BO_LD_D_BR:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2);
gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_D_CIRC:
@@ -4666,7 +4670,7 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx)
break;
case OPC2_32_BO_LD_DA_BR:
CHECK_REG_PAIR(r1);
- gen_ld_2regs_64(cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2, ctx);
+ gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2);
gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]);
break;
case OPC2_32_BO_LD_DA_CIRC:
@@ -8377,7 +8381,7 @@ static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next);
}
-static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx)
+static bool insn_crosses_page(DisasContext *ctx, CPUTriCoreState *env)
{
/*
* Return true if the insn at ctx->base.pc_next might cross a page boundary.
@@ -8420,7 +8424,7 @@ static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE
|| (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - 3
- && insn_crosses_page(env, ctx))) {
+ && insn_crosses_page(ctx, env))) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 69/75] target/tricore: Un-inline various helpers
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (67 preceding siblings ...)
2025-10-16 12:15 ` [PULL 68/75] target/tricore: Pass DisasContext as first argument Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 70/75] target/tricore: Expand TCGv type for 32-bit target Philippe Mathieu-Daudé
` (5 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Rely on the linker to optimize at linking time.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-12-philmd@linaro.org>
---
target/tricore/translate.c | 274 ++++++++++++++++++-------------------
1 file changed, 135 insertions(+), 139 deletions(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 62a2d807266..787c77292fd 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -201,7 +201,7 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
/* For two 32-bit registers used a 64-bit register, the first
registernumber needs to be even. Otherwise we trap. */
-static inline void generate_trap(DisasContext *ctx, int class, int tin);
+static void generate_trap(DisasContext *ctx, int class, int tin);
#define CHECK_REG_PAIR(reg) do { \
if (reg & 0x1) { \
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
@@ -210,16 +210,16 @@ static inline void generate_trap(DisasContext *ctx, int class, int tin);
/* Functions for load/save to/from memory */
-static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
- int16_t con, MemOp mop)
+static void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
+ int16_t con, MemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_i32(temp, r2, con);
tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop);
}
-static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
- int16_t con, MemOp mop)
+static void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
+ int16_t con, MemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_i32(temp, r2, con);
@@ -346,7 +346,7 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
break;
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
-static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
+static void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
{
/* since we're caching PSW make this a special case */
if (offset == 0xfe04) {
@@ -374,8 +374,7 @@ static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
watchdog device, we handle endinit protected registers like
all-access registers for now. */
#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
-static inline void gen_mtcr(DisasContext *ctx, TCGv r1,
- int32_t offset)
+static void gen_mtcr(DisasContext *ctx, TCGv r1, int32_t offset)
{
if (ctx->priv == TRICORE_PRIV_SM) {
/* since we're caching PSW make this a special case */
@@ -394,7 +393,7 @@ static inline void gen_mtcr(DisasContext *ctx, TCGv r1,
/* Functions for arithmetic instructions */
-static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
+static void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
{
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
@@ -415,7 +414,7 @@ static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static inline void
+static void
gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
TCGv temp = tcg_temp_new();
@@ -441,7 +440,7 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_mov_i64(ret, result);
}
-static inline void
+static void
gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, void(*op1)(TCGv, TCGv, TCGv),
void(*op2)(TCGv, TCGv, TCGv))
@@ -488,7 +487,7 @@ gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
/* ret = r2 + (r1 * r3); */
-static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+static void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -519,13 +518,13 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_madd32_d(ret, r1, r2, temp);
}
-static inline void
+static void
gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -553,7 +552,7 @@ gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_mov_i32(ret_high, t4);
}
-static inline void
+static void
gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -583,7 +582,7 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
+static void
gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -591,7 +590,7 @@ gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
+static void
gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -599,7 +598,7 @@ gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
+static void
gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -626,7 +625,7 @@ gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_add_tl, tcg_gen_add_tl);
}
-static inline void
+static void
gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -653,7 +652,7 @@ gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_sub_tl, tcg_gen_add_tl);
}
-static inline void
+static void
gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -686,9 +685,9 @@ gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
}
-static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2);
+static void gen_adds(TCGv ret, TCGv r1, TCGv r2);
-static inline void
+static void
gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -723,9 +722,9 @@ gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2);
+static void gen_subs(TCGv ret, TCGv r1, TCGv r2);
-static inline void
+static void
gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -760,7 +759,7 @@ gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void
+static void
gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -793,7 +792,7 @@ gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
-static inline void
+static void
gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -821,7 +820,7 @@ gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
}
-static inline void
+static void
gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -847,7 +846,7 @@ gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
+static void
gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
@@ -870,7 +869,7 @@ gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
gen_helper_addr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
+static void
gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
@@ -881,7 +880,7 @@ gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
+static void
gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv t_n = tcg_constant_i32(n);
@@ -908,7 +907,7 @@ gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
}
-static inline void
+static void
gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
@@ -931,7 +930,7 @@ gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
gen_helper_addr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
+static void
gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
@@ -942,7 +941,7 @@ gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
+static void
gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv t_n = tcg_constant_i32(n);
@@ -968,21 +967,21 @@ gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_helper_addsur_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
-static inline void
+static void
gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv t_n = tcg_constant_i32(n);
gen_helper_maddr_q(ret, tcg_env, r1, r2, r3, t_n);
}
-static inline void
+static void
gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv t_n = tcg_constant_i32(n);
gen_helper_maddr_q_ssov(ret, tcg_env, r1, r2, r3, t_n);
}
-static inline void
+static void
gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift)
{
@@ -1032,7 +1031,7 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_mov_i32(ret, temp3);
}
-static inline void
+static void
gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
@@ -1049,7 +1048,7 @@ gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_add_d(ret, arg1, temp);
}
-static inline void
+static void
gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
@@ -1066,7 +1065,7 @@ gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_adds(ret, arg1, temp);
}
-static inline void
+static void
gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1093,7 +1092,7 @@ gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t3);
}
-static inline void
+static void
gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1119,7 +1118,7 @@ gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t1);
}
-static inline void
+static void
gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1167,7 +1166,7 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
+static void
gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift)
{
@@ -1185,7 +1184,7 @@ gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
gen_helper_madd32_q_add_ssov(ret, tcg_env, t1, t2);
}
-static inline void
+static void
gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1198,7 +1197,7 @@ gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
}
/* ret = r2 - (r1 * r3); */
-static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+static void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1230,13 +1229,13 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_msub32_d(ret, r1, r2, temp);
}
-static inline void
+static void
gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -1264,7 +1263,7 @@ gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_mov_i32(ret_high, t4);
}
-static inline void
+static void
gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -1272,7 +1271,7 @@ gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
+static void
gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -1300,7 +1299,7 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
+static void
gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -1308,14 +1307,14 @@ gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void gen_addi_d(TCGv ret, TCGv r1, int32_t r2)
+static void gen_addi_d(TCGv ret, TCGv r1, int32_t r2)
{
TCGv temp = tcg_constant_i32(r2);
gen_add_d(ret, r1, temp);
}
/* calculate the carry bit too */
-static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
@@ -1338,13 +1337,13 @@ static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
+static void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_add_CC(ret, r1, temp);
}
-static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
@@ -1366,14 +1365,13 @@ static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
+static void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_addc_CC(ret, r1, temp);
}
-static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
- TCGv r4)
+static void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
@@ -1405,14 +1403,13 @@ static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
-static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
- TCGv r3, TCGv r4)
+static void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, TCGv r3, TCGv r4)
{
TCGv temp = tcg_constant_i32(r2);
gen_cond_add(cond, r1, temp, r3, r4);
}
-static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
+static void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
{
TCGv temp = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
@@ -1433,7 +1430,7 @@ static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static inline void
+static void
gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
TCGv temp = tcg_temp_new();
@@ -1459,7 +1456,7 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_mov_i64(ret, result);
}
-static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv result = tcg_temp_new();
TCGv temp = tcg_temp_new();
@@ -1482,15 +1479,14 @@ static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
{
TCGv temp = tcg_temp_new();
tcg_gen_not_i32(temp, r2);
gen_addc_CC(ret, r1, temp);
}
-static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
- TCGv r4)
+static void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4)
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
@@ -1522,7 +1518,7 @@ static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
-static inline void
+static void
gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -1549,7 +1545,7 @@ gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_sub_tl, tcg_gen_sub_tl);
}
-static inline void
+static void
gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -1584,7 +1580,7 @@ gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void
+static void
gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -1612,7 +1608,7 @@ gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
}
-static inline void
+static void
gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -1638,7 +1634,7 @@ gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
+static void
gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
@@ -1661,7 +1657,7 @@ gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
gen_helper_subr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
+static void
gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
@@ -1672,7 +1668,7 @@ gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
+static void
gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
@@ -1695,7 +1691,7 @@ gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
gen_helper_subr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
-static inline void
+static void
gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv temp = tcg_temp_new();
@@ -1706,21 +1702,21 @@ gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
-static inline void
+static void
gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_constant_i32(n);
gen_helper_msubr_q(ret, tcg_env, r1, r2, r3, temp);
}
-static inline void
+static void
gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_constant_i32(n);
gen_helper_msubr_q_ssov(ret, tcg_env, r1, r2, r3, temp);
}
-static inline void
+static void
gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift)
{
@@ -1761,7 +1757,7 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_mov_i32(ret, temp3);
}
-static inline void
+static void
gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
@@ -1778,7 +1774,7 @@ gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_sub_d(ret, arg1, temp);
}
-static inline void
+static void
gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
{
TCGv temp = tcg_temp_new();
@@ -1795,7 +1791,7 @@ gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_subs(ret, arg1, temp);
}
-static inline void
+static void
gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1822,7 +1818,7 @@ gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t3);
}
-static inline void
+static void
gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1848,7 +1844,7 @@ gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t1);
}
-static inline void
+static void
gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1896,7 +1892,7 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void
+static void
gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
uint32_t up_shift)
{
@@ -1919,7 +1915,7 @@ gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
gen_helper_msub32_q_sub_ssov(ret, tcg_env, t1, t3);
}
-static inline void
+static void
gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
TCGv arg3, uint32_t n)
{
@@ -1931,7 +1927,7 @@ gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, r1);
}
-static inline void
+static void
gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -1958,7 +1954,7 @@ gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_add_tl, tcg_gen_sub_tl);
}
-static inline void
+static void
gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -1991,7 +1987,7 @@ gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
}
-static inline void
+static void
gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv t_n = tcg_constant_i32(n);
@@ -2017,7 +2013,7 @@ gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_helper_subadr_h(ret, tcg_env, temp64, temp, temp2);
}
-static inline void
+static void
gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -2052,7 +2048,7 @@ gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static inline void
+static void
gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
@@ -2084,7 +2080,7 @@ gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
+static void
gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
TCGv t_n = tcg_constant_i32(n);
@@ -2110,7 +2106,7 @@ gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_helper_subadr_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
-static inline void gen_abs(TCGv ret, TCGv r1)
+static void gen_abs(TCGv ret, TCGv r1)
{
tcg_gen_abs_i32(ret, r1);
/* overflow can only happen, if r1 = 0x80000000 */
@@ -2125,7 +2121,7 @@ static inline void gen_abs(TCGv ret, TCGv r1)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
+static void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
{
TCGv temp = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
@@ -2151,19 +2147,19 @@ static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
+static void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_absdif(ret, r1, temp);
}
-static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
+static void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_absdif_ssov(ret, tcg_env, r1, temp);
}
-static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
+static void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
{
TCGv high = tcg_temp_new();
TCGv low = tcg_temp_new();
@@ -2183,13 +2179,13 @@ static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
+static void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_mul_i32s(ret, r1, temp);
}
-static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+static void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
{
tcg_gen_muls2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
@@ -2203,14 +2199,14 @@ static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
+static void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_mul_i64s(ret_low, ret_high, r1, temp);
}
-static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+static void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
{
tcg_gen_mulu2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
@@ -2224,33 +2220,33 @@ static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
+static void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_mul_i64u(ret_low, ret_high, r1, temp);
}
-static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
+static void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_mul_ssov(ret, tcg_env, r1, temp);
}
-static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
+static void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_mul_suov(ret, tcg_env, r1, temp);
}
/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
-static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_madd32_ssov(ret, tcg_env, r1, r2, temp);
}
-static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_madd32_suov(ret, tcg_env, r1, r2, temp);
@@ -2361,7 +2357,7 @@ static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
tcg_gen_andi_i32(ret, ret, 0xffff0000);
}
-static inline void
+static void
gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -2371,7 +2367,7 @@ gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
+static void
gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -2379,7 +2375,7 @@ gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
+static void
gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -2389,7 +2385,7 @@ gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
+static void
gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -2397,19 +2393,19 @@ gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_msub32_ssov(ret, tcg_env, r1, r2, temp);
}
-static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_msub32_suov(ret, tcg_env, r1, r2, temp);
}
-static inline void
+static void
gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -2419,7 +2415,7 @@ gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
+static void
gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -2427,7 +2423,7 @@ gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static inline void
+static void
gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
TCGv r3)
{
@@ -2437,7 +2433,7 @@ gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static inline void
+static void
gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
@@ -2589,37 +2585,37 @@ static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
gen_sh_cond(cond, ret, r1, temp);
}
-static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
+static void gen_adds(TCGv ret, TCGv r1, TCGv r2)
{
gen_helper_add_ssov(ret, tcg_env, r1, r2);
}
-static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
+static void gen_addsi(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_add_ssov(ret, tcg_env, r1, temp);
}
-static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
+static void gen_addsui(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
gen_helper_add_suov(ret, tcg_env, r1, temp);
}
-static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subs(TCGv ret, TCGv r1, TCGv r2)
{
gen_helper_sub_ssov(ret, tcg_env, r1, r2);
}
-static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
{
gen_helper_sub_suov(ret, tcg_env, r1, r2);
}
-static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
- int pos1, int pos2,
- void(*op1)(TCGv, TCGv, TCGv),
- void(*op2)(TCGv, TCGv, TCGv))
+static void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv, TCGv, TCGv),
+ void(*op2)(TCGv, TCGv, TCGv))
{
TCGv temp1, temp2;
@@ -2636,9 +2632,9 @@ static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
}
/* ret = r1[pos1] op1 r2[pos2]; */
-static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
- int pos1, int pos2,
- void(*op1)(TCGv, TCGv, TCGv))
+static void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv, TCGv, TCGv))
{
TCGv temp1, temp2;
@@ -2653,8 +2649,8 @@ static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
tcg_gen_andi_i32(ret, ret, 0x1);
}
-static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
- void(*op)(TCGv, TCGv, TCGv))
+static void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
+ void(*op)(TCGv, TCGv, TCGv))
{
TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
@@ -2668,7 +2664,7 @@ static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
tcg_gen_deposit_i32(ret, ret, temp, 0, 1);
}
-static inline void
+static void
gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
void(*op)(TCGv, TCGv, TCGv))
{
@@ -2676,7 +2672,7 @@ gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
gen_accumulating_cond(cond, ret, r1, temp, op);
}
-static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
+static void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
{
TCGv b0 = tcg_temp_new();
TCGv b1 = tcg_temp_new();
@@ -2705,7 +2701,7 @@ static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
tcg_gen_or_i32(ret, ret, b3);
}
-static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
+static void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
{
TCGv h0 = tcg_temp_new();
TCGv h1 = tcg_temp_new();
@@ -2724,7 +2720,7 @@ static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
/* mask = ((1 << width) -1) << pos;
ret = (r1 & ~mask) | (r2 << pos) & mask); */
-static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
+static void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
{
TCGv mask = tcg_temp_new();
TCGv temp = tcg_temp_new();
@@ -2740,7 +2736,7 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
tcg_gen_or_i32(ret, temp, temp2);
}
-static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
+static void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -2748,7 +2744,7 @@ static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
+static void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -2756,7 +2752,7 @@ static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static inline void
+static void
gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
{
TCGv_i64 ret = tcg_temp_new_i64();
@@ -2769,7 +2765,7 @@ gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
tcg_gen_extr_i64_i32(rl, rh, ret);
}
-static inline void
+static void
gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
{
TCGv_i64 ret = tcg_temp_new_i64();
@@ -2812,7 +2808,7 @@ static void gen_calc_usb_mulr_h(TCGv arg)
/* helpers for generating program flow micro-ops */
-static inline void gen_save_pc(vaddr pc)
+static void gen_save_pc(vaddr pc)
{
tcg_gen_movi_i32(cpu_PC, pc);
}
@@ -2840,8 +2836,8 @@ static void generate_trap(DisasContext *ctx, int class, int tin)
ctx->base.is_jmp = DISAS_NORETURN;
}
-static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
- TCGv r2, int16_t address)
+static void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
+ TCGv r2, int16_t address)
{
TCGLabel *jumpLabel = gen_new_label();
tcg_gen_brcond_i32(cond, r1, r2, jumpLabel);
@@ -2852,8 +2848,8 @@ static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
gen_goto_tb(ctx, 0, ctx->base.pc_next + address * 2);
}
-static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
- int r2, int16_t address)
+static void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
+ int r2, int16_t address)
{
TCGv temp = tcg_constant_i32(r2);
gen_branch_cond(ctx, cond, r1, temp, address);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 70/75] target/tricore: Expand TCGv type for 32-bit target
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (68 preceding siblings ...)
2025-10-16 12:15 ` [PULL 69/75] target/tricore: Un-inline various helpers Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 71/75] target/xtensa: Replace legacy cpu_physical_memory_[un]map() calls Philippe Mathieu-Daudé
` (4 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
The TriCore target is only built as 32-bit:
$ git grep TARGET_LONG_BITS configs/targets/tricore-*
configs/targets/tricore-softmmu.mak:2:TARGET_LONG_BITS=32
Replace:
TCGv -> TCGv_i32
tcg_temp_new -> tcg_temp_new_i32
This is a mechanical replacement, adapting style to pass
the checkpatch.pl script.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251010052141.42460-13-philmd@linaro.org>
---
target/tricore/translate.c | 1248 +++++++++++++++++++-----------------
1 file changed, 642 insertions(+), 606 deletions(-)
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 787c77292fd..fbe05a93a8a 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -136,9 +136,9 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
} while (0)
#define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
tcg_gen_sari_i32(arg00, arg0, 16); \
tcg_gen_ext16s_i32(arg01, arg0); \
tcg_gen_ext16s_i32(arg11, arg1); \
@@ -146,10 +146,10 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
} while (0)
#define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg10 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg10 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
tcg_gen_sari_i32(arg00, arg0, 16); \
tcg_gen_ext16s_i32(arg01, arg0); \
tcg_gen_sari_i32(arg11, arg1, 16); \
@@ -158,10 +158,10 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
} while (0)
#define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg10 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg10 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
tcg_gen_sari_i32(arg00, arg0, 16); \
tcg_gen_ext16s_i32(arg01, arg0); \
tcg_gen_sari_i32(arg10, arg1, 16); \
@@ -170,9 +170,9 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
} while (0)
#define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
- TCGv arg00 = tcg_temp_new(); \
- TCGv arg01 = tcg_temp_new(); \
- TCGv arg11 = tcg_temp_new(); \
+ TCGv_i32 arg00 = tcg_temp_new_i32(); \
+ TCGv_i32 arg01 = tcg_temp_new_i32(); \
+ TCGv_i32 arg11 = tcg_temp_new_i32(); \
tcg_gen_sari_i32(arg01, arg0, 16); \
tcg_gen_ext16s_i32(arg00, arg0); \
tcg_gen_sari_i32(arg11, arg1, 16); \
@@ -210,23 +210,24 @@ static void generate_trap(DisasContext *ctx, int class, int tin);
/* Functions for load/save to/from memory */
-static void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
+static void gen_offset_ld(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
int16_t con, MemOp mop)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, r2, con);
tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop);
}
-static void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
+static void gen_offset_st(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
int16_t con, MemOp mop)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, r2, con);
tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop);
}
-static void gen_st_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address)
+static void gen_st_2regs_64(DisasContext *ctx, TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 address)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -235,14 +236,16 @@ static void gen_st_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address)
}
static void gen_offset_st_2regs(DisasContext *ctx,
- TCGv rh, TCGv rl, TCGv base, int16_t con)
+ TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 base, int16_t con)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, base, con);
gen_st_2regs_64(ctx, rh, rl, temp);
}
-static void gen_ld_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address)
+static void gen_ld_2regs_64(DisasContext *ctx, TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 address)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -252,36 +255,37 @@ static void gen_ld_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address)
}
static void gen_offset_ld_2regs(DisasContext *ctx,
- TCGv rh, TCGv rl, TCGv base, int16_t con)
+ TCGv_i32 rh, TCGv_i32 rl,
+ TCGv_i32 base, int16_t con)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, base, con);
gen_ld_2regs_64(ctx, rh, rl, temp);
}
-static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
- MemOp mop)
+static void gen_st_preincr(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
+ int16_t off, MemOp mop)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, r2, off);
tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop);
tcg_gen_mov_i32(r2, temp);
}
-static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
- MemOp mop)
+static void gen_ld_preincr(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2,
+ int16_t off, MemOp mop)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, r2, off);
tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop);
tcg_gen_mov_i32(r2, temp);
}
/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
-static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
+static void gen_ldmst(DisasContext *ctx, int ereg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(ereg);
/* temp = (M(EA, word) */
@@ -299,19 +303,19 @@ static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
/* tmp = M(EA, word);
M(EA, word) = D[a];
D[a] = tmp[31:0];*/
-static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
+static void gen_swap(DisasContext *ctx, int reg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_qemu_st_i32(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
-static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
+static void gen_cmpswap(DisasContext *ctx, int reg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(reg);
tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_movcond_i32(TCG_COND_EQ, temp2, cpu_gpr_d[reg + 1], temp,
@@ -320,11 +324,11 @@ static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
tcg_gen_mov_i32(cpu_gpr_d[reg], temp);
}
-static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
+static void gen_swapmsk(DisasContext *ctx, int reg, TCGv_i32 ea)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
CHECK_REG_PAIR(reg);
tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL);
tcg_gen_and_i32(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg + 1]);
@@ -346,7 +350,7 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
break;
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
-static void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
+static void gen_mfcr(DisasContext *ctx, TCGv_i32 ret, int32_t offset)
{
/* since we're caching PSW make this a special case */
if (offset == 0xfe04) {
@@ -374,7 +378,7 @@ static void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
watchdog device, we handle endinit protected registers like
all-access registers for now. */
#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
-static void gen_mtcr(DisasContext *ctx, TCGv r1, int32_t offset)
+static void gen_mtcr(DisasContext *ctx, TCGv_i32 r1, int32_t offset)
{
if (ctx->priv == TRICORE_PRIV_SM) {
/* since we're caching PSW make this a special case */
@@ -393,10 +397,10 @@ static void gen_mtcr(DisasContext *ctx, TCGv r1, int32_t offset)
/* Functions for arithmetic instructions */
-static void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
+static void gen_add_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv t0 = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
/* Addition and set V/SV bits */
tcg_gen_add_i32(result, r1, r2);
/* calc V bit */
@@ -414,10 +418,9 @@ static void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static void
-gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+static void gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 result = tcg_temp_new_i64();
@@ -440,15 +443,16 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_mov_i64(ret, result);
}
-static void
-gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, void(*op1)(TCGv, TCGv, TCGv),
- void(*op2)(TCGv, TCGv, TCGv))
+static void gen_addsub64_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32),
+ void(*op2)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
- TCGv temp4 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
+ TCGv_i32 temp4 = tcg_temp_new_i32();
(*op1)(temp, r1_low, r2);
/* calc V0 bit */
@@ -487,7 +491,7 @@ gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
/* ret = r2 + (r1 * r3); */
-static void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+static void gen_madd32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -518,20 +522,19 @@ static void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddi32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_madd32_d(ret, r1, r2, temp);
}
-static void
-gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_madd64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3)
{
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- TCGv t3 = tcg_temp_new();
- TCGv t4 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ TCGv_i32 t4 = tcg_temp_new_i32();
tcg_gen_muls2_i32(t1, t2, r1, r3);
/* only the add can overflow */
@@ -552,9 +555,8 @@ gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_mov_i32(ret_high, t4);
}
-static void
-gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_maddu64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -582,29 +584,28 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void
-gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddi64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void
-gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddui64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void
-gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_madd_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -625,13 +626,14 @@ gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_add_tl, tcg_gen_add_tl);
}
-static void
-gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsu_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -652,11 +654,12 @@ gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_sub_tl, tcg_gen_add_tl);
}
-static void
-gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsum_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -685,16 +688,17 @@ gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
}
-static void gen_adds(TCGv ret, TCGv r1, TCGv r2);
+static void gen_adds(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2);
-static void
-gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_madds_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -722,16 +726,17 @@ gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static void gen_subs(TCGv ret, TCGv r1, TCGv r2);
+static void gen_subs(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2);
-static void
-gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsus_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -759,11 +764,12 @@ gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static void
-gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsums_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
@@ -792,11 +798,12 @@ gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
}
-static void
-gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddm_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -820,11 +827,12 @@ gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
}
-static void
-gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddms_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
@@ -846,11 +854,11 @@ gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static void
-gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
- uint32_t mode)
+static void gen_maddr64_h(TCGv_i32 ret, TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -869,23 +877,25 @@ gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
gen_helper_addr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
-static void
-gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddr32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
tcg_gen_andi_i32(temp2, r1, 0xffff0000);
tcg_gen_shli_i32(temp, r1, 16);
gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
}
-static void
-gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsur32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -907,11 +917,11 @@ gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
}
-static void
-gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
- uint32_t n, uint32_t mode)
+static void gen_maddr64s_h(TCGv_i32 ret, TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -930,23 +940,25 @@ gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
gen_helper_addr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
-static void
-gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddr32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
tcg_gen_andi_i32(temp2, r1, 0xffff0000);
tcg_gen_shli_i32(temp, r1, 16);
gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
-static void
-gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_maddsur32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -967,27 +979,27 @@ gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_helper_addsur_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
-static void
-gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_maddr_q(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
gen_helper_maddr_q(ret, tcg_env, r1, r2, r3, t_n);
}
-static void
-gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_maddrs_q(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
gen_helper_maddr_q_ssov(ret, tcg_env, r1, r2, r3, t_n);
}
-static void
-gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_madd32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1031,11 +1043,12 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_mov_i32(ret, temp3);
}
-static void
-gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16add32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
@@ -1048,11 +1061,12 @@ gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_add_d(ret, arg1, temp);
}
-static void
-gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16adds32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
@@ -1065,12 +1079,12 @@ gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_adds(ret, arg1, temp);
}
-static void
-gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16add64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1092,12 +1106,12 @@ gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t3);
}
-static void
-gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16adds64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1118,15 +1132,15 @@ gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t1);
}
-static void
-gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_madd64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
tcg_gen_ext_i32_i64(t2, arg2);
@@ -1146,8 +1160,8 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
tcg_gen_and_i32(temp, temp, temp2);
@@ -1166,9 +1180,9 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void
-gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_madds32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1184,12 +1198,12 @@ gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
gen_helper_madd32_q_add_ssov(ret, tcg_env, t1, t2);
}
-static void
-gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_madds64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
gen_helper_madd64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n);
@@ -1197,7 +1211,7 @@ gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
}
/* ret = r2 - (r1 * r3); */
-static void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+static void gen_msub32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1229,20 +1243,20 @@ static void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubi32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msub32_d(ret, r1, r2, temp);
}
-static void
-gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msub64_d(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- TCGv t3 = tcg_temp_new();
- TCGv t4 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ TCGv_i32 t4 = tcg_temp_new_i32();
tcg_gen_muls2_i32(t1, t2, r1, r3);
/* only the sub can overflow */
@@ -1263,17 +1277,16 @@ gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_mov_i32(ret_high, t4);
}
-static void
-gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubi64_d(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void
-gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msubu64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1,
+ TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1299,25 +1312,25 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void
-gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubui64_d(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void gen_addi_d(TCGv ret, TCGv r1, int32_t r2)
+static void gen_addi_d(TCGv_i32 ret, TCGv_i32 r1, int32_t r2)
{
- TCGv temp = tcg_constant_i32(r2);
+ TCGv_i32 temp = tcg_constant_i32(r2);
gen_add_d(ret, r1, temp);
}
/* calculate the carry bit too */
-static void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_add_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv t0 = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
tcg_gen_movi_i32(t0, 0);
/* Addition and set C/V/SV bits */
@@ -1337,16 +1350,16 @@ static void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
+static void gen_addi_CC(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_add_CC(ret, r1, temp);
}
-static void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_addc_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv t0 = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
/* Addition, carry and set C/V/SV bits */
tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C);
@@ -1365,19 +1378,20 @@ static void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
+static void gen_addci_CC(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_addc_CC(ret, r1, temp);
}
-static void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4)
+static void gen_cond_add(TCGCond cond,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, TCGv_i32 r4)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv result = tcg_temp_new();
- TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_constant_i32(0);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
+ TCGv_i32 mask = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_i32(cond, mask, r4, t0);
@@ -1403,16 +1417,17 @@ static void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4)
tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
-static void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, TCGv r3, TCGv r4)
+static void gen_condi_add(TCGCond cond,
+ TCGv_i32 r1, int32_t r2, TCGv_i32 r3, TCGv_i32 r4)
{
- TCGv temp = tcg_constant_i32(r2);
+ TCGv_i32 temp = tcg_constant_i32(r2);
gen_cond_add(cond, r1, temp, r3, r4);
}
-static void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
+static void gen_sub_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
tcg_gen_sub_i32(result, r1, r2);
/* calc V bit */
@@ -1430,10 +1445,9 @@ static void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static void
-gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+static void gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 result = tcg_temp_new_i64();
@@ -1456,10 +1470,10 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
tcg_gen_mov_i64(ret, result);
}
-static void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_sub_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv result = tcg_temp_new();
- TCGv temp = tcg_temp_new();
+ TCGv_i32 result = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_sub_i32(result, r1, r2);
/* calc C bit */
@@ -1479,20 +1493,21 @@ static void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subc_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_not_i32(temp, r2);
gen_addc_CC(ret, r1, temp);
}
-static void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4)
+static void gen_cond_sub(TCGCond cond,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, TCGv_i32 r4)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv result = tcg_temp_new();
- TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_constant_i32(0);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
+ TCGv_i32 mask = tcg_temp_new_i32();
+ TCGv_i32 t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_i32(cond, mask, r4, t0);
@@ -1518,13 +1533,14 @@ static void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, TCGv r4)
tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1);
}
-static void
-gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msub_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1545,14 +1561,15 @@ gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_sub_tl, tcg_gen_sub_tl);
}
-static void
-gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubs_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -1580,11 +1597,12 @@ gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static void
-gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubm_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -1608,11 +1626,12 @@ gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
}
-static void
-gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubms_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
@@ -1634,11 +1653,12 @@ gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static void
-gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
- uint32_t mode)
+static void gen_msubr64_h(TCGv_i32 ret,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1657,22 +1677,24 @@ gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
gen_helper_subr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
-static void
-gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubr32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
tcg_gen_andi_i32(temp2, r1, 0xffff0000);
tcg_gen_shli_i32(temp, r1, 16);
gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
}
-static void
-gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
- uint32_t n, uint32_t mode)
+static void gen_msubr64s_h(TCGv_i32 ret,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1691,36 +1713,37 @@ gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
gen_helper_subr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
-static void
-gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubr32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
tcg_gen_andi_i32(temp2, r1, 0xffff0000);
tcg_gen_shli_i32(temp, r1, 16);
gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
}
-static void
-gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_msubr_q(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, uint32_t n)
{
- TCGv temp = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_constant_i32(n);
gen_helper_msubr_q(ret, tcg_env, r1, r2, r3, temp);
}
-static void
-gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+static void gen_msubrs_q(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, uint32_t n)
{
- TCGv temp = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_constant_i32(n);
gen_helper_msubr_q_ssov(ret, tcg_env, r1, r2, r3, temp);
}
-static void
-gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_msub32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1757,11 +1780,12 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
tcg_gen_mov_i32(ret, temp3);
}
-static void
-gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16sub32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
@@ -1774,11 +1798,12 @@ gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_sub_d(ret, arg1, temp);
}
-static void
-gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+static void gen_m16subs32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (n == 0) {
tcg_gen_mul_i32(temp, arg2, arg3);
} else { /* n is expected to be 1 */
@@ -1791,12 +1816,12 @@ gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
gen_subs(ret, arg1, temp);
}
-static void
-gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16sub64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1818,12 +1843,12 @@ gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t3);
}
-static void
-gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_m16subs64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1844,15 +1869,15 @@ gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_extr_i64_i32(rl, rh, t1);
}
-static void
-gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_msub64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t4 = tcg_temp_new_i64();
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
tcg_gen_ext_i32_i64(t2, arg2);
@@ -1872,8 +1897,8 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
if (n == 1) {
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000);
tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3);
tcg_gen_and_i32(temp, temp, temp2);
@@ -1892,9 +1917,9 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void
-gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
- uint32_t up_shift)
+static void gen_msubs32_q(TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3,
+ uint32_t n, uint32_t up_shift)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -1915,25 +1940,26 @@ gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
gen_helper_msub32_q_sub_ssov(ret, tcg_env, t1, t3);
}
-static void
-gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
- TCGv arg3, uint32_t n)
+static void gen_msubs64_q(TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 arg1_low, TCGv_i32 arg1_high,
+ TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
gen_helper_msub64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
-static void
-gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubad_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -1954,11 +1980,12 @@ gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_add_tl, tcg_gen_sub_tl);
}
-static void
-gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadm_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
@@ -1987,12 +2014,13 @@ gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
}
-static void
-gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadr32_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -2013,14 +2041,15 @@ gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_helper_subadr_h(ret, tcg_env, temp64, temp, temp2);
}
-static void
-gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubads_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
- TCGv temp3 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
+ TCGv_i32 temp3 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
@@ -2048,11 +2077,12 @@ gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3);
}
-static void
-gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
- TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadms_h(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1_low, TCGv_i32 r1_high,
+ TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
+ TCGv_i32 t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
@@ -2080,12 +2110,13 @@ gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static void
-gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+static void gen_msubadr32s_h(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3,
+ uint32_t n, uint32_t mode)
{
- TCGv t_n = tcg_constant_i32(n);
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 t_n = tcg_constant_i32(n);
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
@@ -2106,7 +2137,7 @@ gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
gen_helper_subadr_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
-static void gen_abs(TCGv ret, TCGv r1)
+static void gen_abs(TCGv_i32 ret, TCGv_i32 r1)
{
tcg_gen_abs_i32(ret, r1);
/* overflow can only happen, if r1 = 0x80000000 */
@@ -2121,10 +2152,10 @@ static void gen_abs(TCGv ret, TCGv r1)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
+static void gen_absdif(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new_i32();
- TCGv result = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 result = tcg_temp_new_i32();
tcg_gen_sub_i32(result, r1, r2);
tcg_gen_sub_i32(temp, r2, r1);
@@ -2147,22 +2178,22 @@ static void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_mov_i32(ret, result);
}
-static void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
+static void gen_absdifi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_absdif(ret, r1, temp);
}
-static void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
+static void gen_absdifsi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_absdif_ssov(ret, tcg_env, r1, temp);
}
-static void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
+static void gen_mul_i32s(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv high = tcg_temp_new();
- TCGv low = tcg_temp_new();
+ TCGv_i32 high = tcg_temp_new_i32();
+ TCGv_i32 low = tcg_temp_new_i32();
tcg_gen_muls2_i32(low, high, r1, r2);
tcg_gen_mov_i32(ret, low);
@@ -2179,13 +2210,14 @@ static void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
+static void gen_muli_i32s(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_mul_i32s(ret, r1, temp);
}
-static void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+static void gen_mul_i64s(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2)
{
tcg_gen_muls2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
@@ -2199,14 +2231,15 @@ static void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
- int32_t con)
+static void gen_muli_i64s(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_mul_i64s(ret_low, ret_high, r1, temp);
}
-static void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+static void gen_mul_i64u(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2)
{
tcg_gen_mulu2_i32(ret_low, ret_high, r1, r2);
/* clear V bit */
@@ -2220,40 +2253,40 @@ static void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
- int32_t con)
+static void gen_muli_i64u(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_mul_i64u(ret_low, ret_high, r1, temp);
}
-static void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
+static void gen_mulsi_i32(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_mul_ssov(ret, tcg_env, r1, temp);
}
-static void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
+static void gen_mulsui_i32(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_mul_suov(ret, tcg_env, r1, temp);
}
/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
-static void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddsi_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_madd32_ssov(ret, tcg_env, r1, r2, temp);
}
-static void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_maddsui_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_madd32_suov(ret, tcg_env, r1, r2, temp);
}
-static void
-gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
+static void gen_mul_q(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2,
+ uint32_t n, uint32_t up_shift)
{
TCGv_i64 temp_64 = tcg_temp_new_i64();
TCGv_i64 temp2_64 = tcg_temp_new_i64();
@@ -2309,10 +2342,9 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void
-gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+static void gen_mul_q_16(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, uint32_t n)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
if (n == 0) {
tcg_gen_mul_i32(ret, arg1, arg2);
} else { /* n is expected to be 1 */
@@ -2331,9 +2363,9 @@ gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+static void gen_mulr_q(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, uint32_t n)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
if (n == 0) {
tcg_gen_mul_i32(ret, arg1, arg2);
tcg_gen_addi_i32(ret, ret, 0x8000);
@@ -2357,9 +2389,9 @@ static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
tcg_gen_andi_i32(ret, ret, 0xffff0000);
}
-static void
-gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_madds_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2367,17 +2399,17 @@ gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static void
-gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddsi_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void
-gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_maddsu_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2385,29 +2417,29 @@ gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static void
-gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_maddsui_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low,
+ TCGv_i32 r2_high, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubsi_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_msub32_ssov(ret, tcg_env, r1, r2, temp);
}
-static void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+static void gen_msubsui_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_msub32_suov(ret, tcg_env, r1, r2, temp);
}
-static void
-gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msubs_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2415,17 +2447,17 @@ gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static void
-gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubsi_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void
-gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- TCGv r3)
+static void gen_msubsu_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ TCGv_i32 r3)
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
@@ -2433,26 +2465,26 @@ gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
-static void
-gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
- int32_t con)
+static void gen_msubsui_64(TCGv_i32 ret_low, TCGv_i32 ret_high,
+ TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high,
+ int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
-static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
+static void gen_saturate(TCGv_i32 ret, TCGv_i32 arg, int32_t up, int32_t low)
{
tcg_gen_smax_i32(ret, arg, tcg_constant_i32(low));
tcg_gen_smin_i32(ret, ret, tcg_constant_i32(up));
}
-static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
+static void gen_saturate_u(TCGv_i32 ret, TCGv_i32 arg, int32_t up)
{
tcg_gen_umin_i32(ret, arg, tcg_constant_i32(up));
}
-static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
+static void gen_shi(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count)
{
if (shift_count == -32) {
tcg_gen_movi_i32(ret, 0);
@@ -2463,15 +2495,15 @@ static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
}
}
-static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
+static void gen_sh_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t shiftcount)
{
- TCGv temp_low, temp_high;
+ TCGv_i32 temp_low, temp_high;
if (shiftcount == -16) {
tcg_gen_movi_i32(ret, 0);
} else {
- temp_high = tcg_temp_new();
- temp_low = tcg_temp_new();
+ temp_high = tcg_temp_new_i32();
+ temp_low = tcg_temp_new_i32();
tcg_gen_andi_i32(temp_low, r1, 0xffff);
tcg_gen_andi_i32(temp_high, r1, 0xffff0000);
@@ -2481,11 +2513,11 @@ static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
}
}
-static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
+static void gen_shaci(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count)
{
uint32_t msk, msk_start;
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
if (shift_count == 0) {
/* Clear PSW.C and PSW.V */
@@ -2500,8 +2532,8 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
/* clear PSW.V */
tcg_gen_movi_i32(cpu_PSW_V, 0);
} else if (shift_count > 0) {
- TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count);
- TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count);
+ TCGv_i32 t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count);
+ TCGv_i32 t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count);
/* calc carry */
msk_start = 32 - shift_count;
@@ -2532,34 +2564,34 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
}
-static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
+static void gen_shas(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_sha_ssov(ret, tcg_env, r1, r2);
}
-static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
+static void gen_shasi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_shas(ret, r1, temp);
}
-static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
+static void gen_sha_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count)
{
- TCGv low, high;
+ TCGv_i32 low, high;
if (shift_count == 0) {
tcg_gen_mov_i32(ret, r1);
} else if (shift_count > 0) {
- low = tcg_temp_new();
- high = tcg_temp_new();
+ low = tcg_temp_new_i32();
+ high = tcg_temp_new_i32();
tcg_gen_andi_i32(high, r1, 0xffff0000);
tcg_gen_shli_i32(low, r1, shift_count);
tcg_gen_shli_i32(ret, high, shift_count);
tcg_gen_deposit_i32(ret, ret, low, 0, 16);
} else {
- low = tcg_temp_new();
- high = tcg_temp_new();
+ low = tcg_temp_new_i32();
+ high = tcg_temp_new_i32();
tcg_gen_ext16s_i32(low, r1);
tcg_gen_sari_i32(low, low, -shift_count);
@@ -2569,58 +2601,58 @@ static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
}
/* ret = {ret[30:0], (r1 cond r2)}; */
-static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
+static void gen_sh_cond(int cond, TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
tcg_gen_shli_i32(temp, ret, 1);
tcg_gen_setcond_i32(cond, temp2, r1, r2);
tcg_gen_or_i32(ret, temp, temp2);
}
-static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
+static void gen_sh_condi(int cond, TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_sh_cond(cond, ret, r1, temp);
}
-static void gen_adds(TCGv ret, TCGv r1, TCGv r2)
+static void gen_adds(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_add_ssov(ret, tcg_env, r1, r2);
}
-static void gen_addsi(TCGv ret, TCGv r1, int32_t con)
+static void gen_addsi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_add_ssov(ret, tcg_env, r1, temp);
}
-static void gen_addsui(TCGv ret, TCGv r1, int32_t con)
+static void gen_addsui(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_helper_add_suov(ret, tcg_env, r1, temp);
}
-static void gen_subs(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subs(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_sub_ssov(ret, tcg_env, r1, r2);
}
-static void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
+static void gen_subsu(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2)
{
gen_helper_sub_suov(ret, tcg_env, r1, r2);
}
-static void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
+static void gen_bit_2op(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2,
int pos1, int pos2,
- void(*op1)(TCGv, TCGv, TCGv),
- void(*op2)(TCGv, TCGv, TCGv))
+ void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32),
+ void(*op2)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp1, temp2;
+ TCGv_i32 temp1, temp2;
- temp1 = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp1 = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(temp2, r2, pos2);
tcg_gen_shri_i32(temp1, r1, pos1);
@@ -2632,14 +2664,14 @@ static void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
}
/* ret = r1[pos1] op1 r2[pos2]; */
-static void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
+static void gen_bit_1op(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2,
int pos1, int pos2,
- void(*op1)(TCGv, TCGv, TCGv))
+ void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp1, temp2;
+ TCGv_i32 temp1, temp2;
- temp1 = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp1 = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(temp2, r2, pos2);
tcg_gen_shri_i32(temp1, r1, pos1);
@@ -2649,11 +2681,12 @@ static void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
tcg_gen_andi_i32(ret, ret, 0x1);
}
-static void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
- void(*op)(TCGv, TCGv, TCGv))
+static void gen_accumulating_cond(int cond, TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2,
+ void(*op)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
/* temp = (arg1 cond arg2 )*/
tcg_gen_setcond_i32(cond, temp, r1, r2);
/* temp2 = ret[0]*/
@@ -2664,20 +2697,20 @@ static void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
tcg_gen_deposit_i32(ret, ret, temp, 0, 1);
}
-static void
-gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
- void(*op)(TCGv, TCGv, TCGv))
+static void gen_accumulating_condi(int cond, TCGv_i32 ret, TCGv_i32 r1,
+ int32_t con,
+ void(*op)(TCGv_i32, TCGv_i32, TCGv_i32))
{
- TCGv temp = tcg_constant_i32(con);
+ TCGv_i32 temp = tcg_constant_i32(con);
gen_accumulating_cond(cond, ret, r1, temp, op);
}
-static void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
+static void gen_eqany_bi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv b0 = tcg_temp_new();
- TCGv b1 = tcg_temp_new();
- TCGv b2 = tcg_temp_new();
- TCGv b3 = tcg_temp_new();
+ TCGv_i32 b0 = tcg_temp_new_i32();
+ TCGv_i32 b1 = tcg_temp_new_i32();
+ TCGv_i32 b2 = tcg_temp_new_i32();
+ TCGv_i32 b3 = tcg_temp_new_i32();
/* byte 0 */
tcg_gen_andi_i32(b0, r1, 0xff);
@@ -2701,10 +2734,10 @@ static void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
tcg_gen_or_i32(ret, ret, b3);
}
-static void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
+static void gen_eqany_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t con)
{
- TCGv h0 = tcg_temp_new();
- TCGv h1 = tcg_temp_new();
+ TCGv_i32 h0 = tcg_temp_new_i32();
+ TCGv_i32 h1 = tcg_temp_new_i32();
/* halfword 0 */
tcg_gen_andi_i32(h0, r1, 0xffff);
@@ -2720,11 +2753,12 @@ static void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
/* mask = ((1 << width) -1) << pos;
ret = (r1 & ~mask) | (r2 << pos) & mask); */
-static void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
+static void gen_insert(TCGv_i32 ret,
+ TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 width, TCGv_i32 pos)
{
- TCGv mask = tcg_temp_new();
- TCGv temp = tcg_temp_new();
- TCGv temp2 = tcg_temp_new();
+ TCGv_i32 mask = tcg_temp_new_i32();
+ TCGv_i32 temp = tcg_temp_new_i32();
+ TCGv_i32 temp2 = tcg_temp_new_i32();
tcg_gen_shl_i32(mask, tcg_constant_i32(1), width);
tcg_gen_subi_i32(mask, mask, 1);
@@ -2736,7 +2770,7 @@ static void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
tcg_gen_or_i32(ret, temp, temp2);
}
-static void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
+static void gen_bsplit(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 r1)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -2744,7 +2778,7 @@ static void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
+static void gen_unpack(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 r1)
{
TCGv_i64 temp = tcg_temp_new_i64();
@@ -2752,8 +2786,9 @@ static void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
tcg_gen_extr_i64_i32(rl, rh, temp);
}
-static void
-gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+static void gen_dvinit_b(DisasContext *ctx,
+ TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 r1, TCGv_i32 r2)
{
TCGv_i64 ret = tcg_temp_new_i64();
@@ -2765,8 +2800,9 @@ gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
tcg_gen_extr_i64_i32(rl, rh, ret);
}
-static void
-gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+static void gen_dvinit_h(DisasContext *ctx,
+ TCGv_i32 rl, TCGv_i32 rh,
+ TCGv_i32 r1, TCGv_i32 r2)
{
TCGv_i64 ret = tcg_temp_new_i64();
@@ -2778,9 +2814,9 @@ gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
tcg_gen_extr_i64_i32(rl, rh, ret);
}
-static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high)
+static void gen_calc_usb_mul_h(TCGv_i32 arg_low, TCGv_i32 arg_high)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
/* calc AV bit */
tcg_gen_add_i32(temp, arg_low, arg_low);
tcg_gen_xor_i32(temp, temp, arg_low);
@@ -2792,9 +2828,9 @@ static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high)
tcg_gen_movi_i32(cpu_PSW_V, 0);
}
-static void gen_calc_usb_mulr_h(TCGv arg)
+static void gen_calc_usb_mulr_h(TCGv_i32 arg)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
/* calc AV bit */
tcg_gen_add_i32(temp, arg, arg);
tcg_gen_xor_i32(temp, temp, arg);
@@ -2836,8 +2872,8 @@ static void generate_trap(DisasContext *ctx, int class, int tin)
ctx->base.is_jmp = DISAS_NORETURN;
}
-static void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
- TCGv r2, int16_t address)
+static void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv_i32 r1,
+ TCGv_i32 r2, int16_t address)
{
TCGLabel *jumpLabel = gen_new_label();
tcg_gen_brcond_i32(cond, r1, r2, jumpLabel);
@@ -2848,10 +2884,10 @@ static void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
gen_goto_tb(ctx, 0, ctx->base.pc_next + address * 2);
}
-static void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
+static void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv_i32 r1,
int r2, int16_t address)
{
- TCGv temp = tcg_constant_i32(r2);
+ TCGv_i32 temp = tcg_constant_i32(r2);
gen_branch_cond(ctx, cond, r1, temp, address);
}
@@ -2868,7 +2904,7 @@ static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
static void gen_fcall_save_ctx(DisasContext *ctx)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, cpu_gpr_a[10], -4);
tcg_gen_qemu_st_i32(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
@@ -2878,7 +2914,7 @@ static void gen_fcall_save_ctx(DisasContext *ctx)
static void gen_fret(DisasContext *ctx)
{
- TCGv temp = tcg_temp_new();
+ TCGv_i32 temp = tcg_temp_new_i32();
tcg_gen_andi_i32(temp, cpu_gpr_a[11], ~0x1);
tcg_gen_qemu_ld_i32(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
@@ -2890,7 +2926,7 @@ static void gen_fret(DisasContext *ctx)
static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
int r2 , int32_t constant , int32_t offset)
{
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
int n;
switch (opc) {
@@ -2927,12 +2963,12 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
break;
/* SBRN-format jumps */
case OPC1_16_SBRN_JZ_T:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant);
gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
break;
case OPC1_16_SBRN_JNZ_T:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant);
gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
break;
@@ -3040,7 +3076,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
}
break;
case OPCM_32_BRC_JNE:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* subi is unconditional */
@@ -3057,7 +3093,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
case OPCM_32_BRN_JTT:
n = MASK_OP_BRN_N(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_andi_i32(temp, cpu_gpr_d[r1], (1 << n));
if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
@@ -3112,8 +3148,8 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
}
break;
case OPCM_32_BRR_JNE:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
/* also save r2, in case of r1 == r2, so r2 is not decremented */
@@ -3151,7 +3187,7 @@ static void decode_src_opc(DisasContext *ctx, int op1)
{
int r1;
int32_t const4;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
r1 = MASK_OP_SRC_S1D(ctx->opcode);
const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
@@ -3227,7 +3263,7 @@ static void decode_src_opc(DisasContext *ctx, int op1)
static void decode_srr_opc(DisasContext *ctx, int op1)
{
int r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_SRR_S1D(ctx->opcode);
r2 = MASK_OP_SRR_S2(ctx->opcode);
@@ -3545,7 +3581,7 @@ static void decode_16Bit_opc(DisasContext *ctx)
int r1, r2;
int32_t const16;
int32_t address;
- TCGv temp;
+ TCGv_i32 temp;
op1 = MASK_OP_MAJOR(ctx->opcode);
@@ -3612,7 +3648,7 @@ static void decode_16Bit_opc(DisasContext *ctx)
r2 = MASK_OP_SRRS_S2(ctx->opcode);
r1 = MASK_OP_SRRS_S1D(ctx->opcode);
const16 = MASK_OP_SRRS_N(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_shli_i32(temp, cpu_gpr_d[15], const16);
tcg_gen_add_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
break;
@@ -3780,7 +3816,7 @@ static void decode_abs_ldw(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3813,7 +3849,7 @@ static void decode_abs_ldb(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3844,7 +3880,7 @@ static void decode_abs_ldst_swap(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3895,7 +3931,7 @@ static void decode_abs_store(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -3928,7 +3964,7 @@ static void decode_abs_storeb_h(DisasContext *ctx)
int32_t op2;
int32_t r1;
uint32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_ABS_S1D(ctx->opcode);
address = MASK_OP_ABS_OFF18(ctx->opcode);
@@ -4030,7 +4066,7 @@ static void decode_bit_insert(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int pos1, pos2;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_BIT_OP2(ctx->opcode);
r1 = MASK_OP_BIT_S1(ctx->opcode);
r2 = MASK_OP_BIT_S2(ctx->opcode);
@@ -4038,7 +4074,7 @@ static void decode_bit_insert(DisasContext *ctx)
pos1 = MASK_OP_BIT_POS1(ctx->opcode);
pos2 = MASK_OP_BIT_POS2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_shri_i32(temp, cpu_gpr_d[r2], pos2);
if (op2 == OPC2_32_BIT_INSN_T) {
@@ -4129,7 +4165,7 @@ static void decode_bit_sh_logic1(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int pos1, pos2;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_BIT_OP2(ctx->opcode);
r1 = MASK_OP_BIT_S1(ctx->opcode);
@@ -4138,7 +4174,7 @@ static void decode_bit_sh_logic1(DisasContext *ctx)
pos1 = MASK_OP_BIT_POS1(ctx->opcode);
pos2 = MASK_OP_BIT_POS2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_BIT_SH_AND_T:
@@ -4169,7 +4205,7 @@ static void decode_bit_sh_logic2(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int pos1, pos2;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_BIT_OP2(ctx->opcode);
r1 = MASK_OP_BIT_S1(ctx->opcode);
@@ -4178,7 +4214,7 @@ static void decode_bit_sh_logic2(DisasContext *ctx)
pos1 = MASK_OP_BIT_POS1(ctx->opcode);
pos2 = MASK_OP_BIT_POS2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_BIT_SH_NAND_T:
@@ -4212,7 +4248,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4296,7 +4332,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
break;
case OPC2_32_BO_ST_D_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
@@ -4314,7 +4350,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
break;
case OPC2_32_BO_ST_DA_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
@@ -4331,19 +4367,19 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_Q_SHORTOFF:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
break;
case OPC2_32_BO_ST_Q_POSTINC:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
tcg_gen_qemu_st_i32(temp, cpu_gpr_a[r2], ctx->mem_idx,
MO_LEUW);
tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_ST_Q_PREINC:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16);
gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
break;
@@ -4368,15 +4404,15 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp, temp2, t_off10;
+ TCGv_i32 temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
@@ -4475,7 +4511,7 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4529,7 +4565,7 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
break;
case OPC2_32_BO_LD_D_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
@@ -4547,7 +4583,7 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
break;
case OPC2_32_BO_LD_DA_PREINC:
CHECK_REG_PAIR(r1);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10);
gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp);
tcg_gen_mov_i32(cpu_gpr_a[r2], temp);
@@ -4609,15 +4645,15 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int r1, r2;
- TCGv temp, temp2, t_off10;
+ TCGv_i32 temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
@@ -4724,7 +4760,7 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx)
uint32_t off10;
int r1, r2;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4732,7 +4768,7 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx)
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_BO_LDLCX_SHORTOFF:
@@ -4812,15 +4848,15 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx)
uint32_t op2;
uint32_t off10;
int r1, r2;
- TCGv temp, temp2, t_off10;
+ TCGv_i32 temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
op2 = MASK_OP_BO_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]);
@@ -4868,7 +4904,7 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1)
{
int r1, r2;
int32_t address;
- TCGv temp;
+ TCGv_i32 temp;
r1 = MASK_OP_BOL_S1D(ctx->opcode);
r2 = MASK_OP_BOL_S2(ctx->opcode);
@@ -4876,12 +4912,12 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1)
switch (op1) {
case OPC1_32_BOL_LD_A_LONGOFF:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address);
tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
break;
case OPC1_32_BOL_LD_W_LONGOFF:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address);
tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
break;
@@ -4951,7 +4987,7 @@ static void decode_rc_logical_shift(DisasContext *ctx)
uint32_t op2;
int r1, r2;
int32_t const9;
- TCGv temp;
+ TCGv_i32 temp;
r2 = MASK_OP_RC_D(ctx->opcode);
r1 = MASK_OP_RC_S1(ctx->opcode);
@@ -4966,12 +5002,12 @@ static void decode_rc_logical_shift(DisasContext *ctx)
tcg_gen_andi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
break;
case OPC2_32_RC_NAND:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_movi_i32(temp, const9);
tcg_gen_nand_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_NOR:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_movi_i32(temp, const9);
tcg_gen_nor_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
@@ -5026,7 +5062,7 @@ static void decode_rc_accumulator(DisasContext *ctx)
int r1, r2;
int16_t const9;
- TCGv temp;
+ TCGv_i32 temp;
r2 = MASK_OP_RC_D(ctx->opcode);
r1 = MASK_OP_RC_S1(ctx->opcode);
@@ -5034,7 +5070,7 @@ static void decode_rc_accumulator(DisasContext *ctx)
op2 = MASK_OP_RC_OP2(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RC_ABSDIF:
@@ -5287,7 +5323,7 @@ static void decode_rcpw_insert(DisasContext *ctx)
int r1, r2;
int32_t pos, width, const4;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RCPW_OP2(ctx->opcode);
r1 = MASK_OP_RCPW_S1(ctx->opcode);
@@ -5328,7 +5364,7 @@ static void decode_rcrw_insert(DisasContext *ctx)
int r1, r3, r4;
int32_t width, const4;
- TCGv temp, temp2, temp3;
+ TCGv_i32 temp, temp2, temp3;
op2 = MASK_OP_RCRW_OP2(ctx->opcode);
r1 = MASK_OP_RCRW_S1(ctx->opcode);
@@ -5337,8 +5373,8 @@ static void decode_rcrw_insert(DisasContext *ctx)
width = MASK_OP_RCRW_WIDTH(ctx->opcode);
const4 = MASK_OP_RCRW_CONST4(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RCRW_IMASK:
@@ -5350,7 +5386,7 @@ static void decode_rcrw_insert(DisasContext *ctx)
tcg_gen_shl_i32(cpu_gpr_d[r4], temp2, temp);
break;
case OPC2_32_RCRW_INSERT:
- temp3 = tcg_temp_new();
+ temp3 = tcg_temp_new_i32();
tcg_gen_movi_i32(temp, width);
tcg_gen_movi_i32(temp2, const4);
@@ -5370,7 +5406,7 @@ static void decode_rcr_cond_select(DisasContext *ctx)
int r1, r3, r4;
int32_t const9;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RCR_OP2(ctx->opcode);
r1 = MASK_OP_RCR_S1(ctx->opcode);
@@ -5577,7 +5613,7 @@ static void decode_rr_accumulator(DisasContext *ctx)
uint32_t op2;
int r3, r2, r1;
- TCGv temp;
+ TCGv_i32 temp;
r3 = MASK_OP_RR_D(ctx->opcode);
r2 = MASK_OP_RR_S2(ctx->opcode);
@@ -5774,7 +5810,7 @@ static void decode_rr_accumulator(DisasContext *ctx)
break;
case OPC2_32_RR_MOV_64:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
tcg_gen_mov_i32(temp, cpu_gpr_d[r1]);
@@ -5993,7 +6029,7 @@ static void decode_rr_address(DisasContext *ctx)
{
uint32_t op2, n;
int r1, r2, r3;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RR_OP2(ctx->opcode);
r3 = MASK_OP_RR_D(ctx->opcode);
@@ -6006,12 +6042,12 @@ static void decode_rr_address(DisasContext *ctx)
tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
break;
case OPC2_32_RR_ADDSC_A:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_shli_i32(temp, cpu_gpr_d[r1], n);
tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
break;
case OPC2_32_RR_ADDSC_AT:
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 3);
tcg_gen_add_i32(temp, cpu_gpr_a[r2], temp);
tcg_gen_andi_i32(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
@@ -6091,7 +6127,7 @@ static void decode_rr_divide(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
- TCGv temp, temp2, temp3;
+ TCGv_i32 temp, temp2, temp3;
op2 = MASK_OP_RR_OP2(ctx->opcode);
r3 = MASK_OP_RR_D(ctx->opcode);
@@ -6112,9 +6148,9 @@ static void decode_rr_divide(DisasContext *ctx)
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_DVINIT_BU:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
- temp3 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
+ temp3 = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 8);
/* reset av */
@@ -6141,9 +6177,9 @@ static void decode_rr_divide(DisasContext *ctx)
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_DVINIT_HU:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
- temp3 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
+ temp3 = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 16);
/* reset av */
@@ -6165,8 +6201,8 @@ static void decode_rr_divide(DisasContext *ctx)
tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp3);
break;
case OPC2_32_RR_DVINIT:
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(r3);
/* overflow = ((D[b] == 0) ||
((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
@@ -6315,7 +6351,7 @@ static void decode_rr1_mul(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
- TCGv n;
+ TCGv_i32 n;
TCGv_i64 temp64;
r1 = MASK_OP_RR1_S1(ctx->opcode);
@@ -6420,7 +6456,7 @@ static void decode_rr1_mulq(DisasContext *ctx)
int r1, r2, r3;
uint32_t n;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
r1 = MASK_OP_RR1_S1(ctx->opcode);
r2 = MASK_OP_RR1_S2(ctx->opcode);
@@ -6428,8 +6464,8 @@ static void decode_rr1_mulq(DisasContext *ctx)
n = MASK_OP_RR1_N(ctx->opcode);
op2 = MASK_OP_RR1_OP2(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RR1_MUL_Q_32:
@@ -6526,7 +6562,7 @@ static void decode_rrpw_extract_insert(DisasContext *ctx)
uint32_t op2;
int r1, r2, r3;
int32_t pos, width;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RRPW_OP2(ctx->opcode);
r1 = MASK_OP_RRPW_S1(ctx->opcode);
@@ -6554,7 +6590,7 @@ static void decode_rrpw_extract_insert(DisasContext *ctx)
CHECK_REG_PAIR(r3);
if (pos + width <= 32) {
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
tcg_gen_movi_i32(temp, ((1u << width) - 1) << pos);
tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp);
@@ -6580,7 +6616,7 @@ static void decode_rrr_cond_select(DisasContext *ctx)
{
uint32_t op2;
int r1, r2, r3, r4;
- TCGv temp;
+ TCGv_i32 temp;
op2 = MASK_OP_RRR_OP2(ctx->opcode);
r1 = MASK_OP_RRR_S1(ctx->opcode);
@@ -6964,7 +7000,7 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
{
uint32_t op2;
uint32_t r1, r2, r3, r4, n;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RRR1_OP2(ctx->opcode);
r1 = MASK_OP_RRR1_S1(ctx->opcode);
@@ -6973,8 +7009,8 @@ static void decode_rrr1_maddq_h(DisasContext *ctx)
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRR1_MADD_Q_32:
@@ -7446,7 +7482,7 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
{
uint32_t op2;
uint32_t r1, r2, r3, r4, n;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RRR1_OP2(ctx->opcode);
r1 = MASK_OP_RRR1_S1(ctx->opcode);
@@ -7455,8 +7491,8 @@ static void decode_rrr1_msubq_h(DisasContext *ctx)
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_temp_new();
- temp2 = tcg_temp_new();
+ temp = tcg_temp_new_i32();
+ temp2 = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRR1_MSUB_Q_32:
@@ -7783,7 +7819,7 @@ static void decode_rrrr_extract_insert(DisasContext *ctx)
{
uint32_t op2;
int r1, r2, r3, r4;
- TCGv tmp_width, tmp_pos;
+ TCGv_i32 tmp_width, tmp_pos;
r1 = MASK_OP_RRRR_S1(ctx->opcode);
r2 = MASK_OP_RRRR_S2(ctx->opcode);
@@ -7791,8 +7827,8 @@ static void decode_rrrr_extract_insert(DisasContext *ctx)
r4 = MASK_OP_RRRR_D(ctx->opcode);
op2 = MASK_OP_RRRR_OP2(ctx->opcode);
- tmp_pos = tcg_temp_new();
- tmp_width = tcg_temp_new();
+ tmp_pos = tcg_temp_new_i32();
+ tmp_width = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRRR_DEXTR:
@@ -7800,8 +7836,8 @@ static void decode_rrrr_extract_insert(DisasContext *ctx)
if (r1 == r2) {
tcg_gen_rotl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
} else {
- TCGv msw = tcg_temp_new();
- TCGv zero = tcg_constant_i32(0);
+ TCGv_i32 msw = tcg_temp_new_i32();
+ TCGv_i32 zero = tcg_constant_i32(0);
tcg_gen_shl_i32(tmp_width, cpu_gpr_d[r1], tmp_pos);
tcg_gen_subfi_i32(msw, 32, tmp_pos);
tcg_gen_shr_i32(msw, cpu_gpr_d[r2], msw);
@@ -7848,7 +7884,7 @@ static void decode_rrrw_extract_insert(DisasContext *ctx)
int r1, r2, r3, r4;
int32_t width;
- TCGv temp, temp2;
+ TCGv_i32 temp, temp2;
op2 = MASK_OP_RRRW_OP2(ctx->opcode);
r1 = MASK_OP_RRRW_S1(ctx->opcode);
@@ -7857,7 +7893,7 @@ static void decode_rrrw_extract_insert(DisasContext *ctx)
r4 = MASK_OP_RRRW_D(ctx->opcode);
width = MASK_OP_RRRW_WIDTH(ctx->opcode);
- temp = tcg_temp_new();
+ temp = tcg_temp_new_i32();
switch (op2) {
case OPC2_32_RRRW_EXTR:
@@ -7877,7 +7913,7 @@ static void decode_rrrw_extract_insert(DisasContext *ctx)
}
break;
case OPC2_32_RRRW_IMASK:
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
CHECK_REG_PAIR(r4);
tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f);
tcg_gen_movi_i32(temp2, (1 << width) - 1);
@@ -7886,7 +7922,7 @@ static void decode_rrrw_extract_insert(DisasContext *ctx)
tcg_gen_mov_i32(cpu_gpr_d[r4 + 1], temp2);
break;
case OPC2_32_RRRW_INSERT:
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(temp, width);
tcg_gen_andi_i32(temp2, cpu_gpr_d[r3], 0x1f);
@@ -7903,7 +7939,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
uint32_t op2;
uint32_t r1;
TCGLabel *l1;
- TCGv tmp;
+ TCGv_i32 tmp;
op2 = MASK_OP_SYS_OP2(ctx->opcode);
r1 = MASK_OP_SYS_S1D(ctx->opcode);
@@ -7957,7 +7993,7 @@ static void decode_sys_interrupts(DisasContext *ctx)
break;
case OPC2_32_SYS_RFM:
if (ctx->priv == TRICORE_PRIV_SM) {
- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i32();
l1 = gen_new_label();
tcg_gen_ld_i32(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR));
@@ -8013,7 +8049,7 @@ static void decode_32Bit_opc(DisasContext *ctx)
int32_t address, const16;
int8_t b, const4;
int32_t bpos;
- TCGv temp, temp2, temp3;
+ TCGv_i32 temp, temp2, temp3;
op1 = MASK_OP_MAJOR(ctx->opcode);
@@ -8046,7 +8082,7 @@ static void decode_32Bit_opc(DisasContext *ctx)
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(temp2, cpu_gpr_d[r1], 16);
tcg_gen_qemu_st_i32(temp2, temp, ctx->mem_idx, MO_LEUW);
@@ -8081,7 +8117,7 @@ static void decode_32Bit_opc(DisasContext *ctx)
bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
temp = tcg_constant_i32(EA_ABS_FORMAT(address));
- temp2 = tcg_temp_new();
+ temp2 = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(temp2, temp, ctx->mem_idx, MO_UB);
tcg_gen_andi_i32(temp2, temp2, ~(0x1u << bpos));
@@ -8208,8 +8244,8 @@ static void decode_32Bit_opc(DisasContext *ctx)
r3 = MASK_OP_RCRR_D(ctx->opcode);
const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
temp = tcg_constant_i32(const16);
- temp2 = tcg_temp_new(); /* width*/
- temp3 = tcg_temp_new(); /* pos */
+ temp2 = tcg_temp_new_i32(); /* width*/
+ temp3 = tcg_temp_new_i32(); /* pos */
CHECK_REG_PAIR(r2);
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 71/75] target/xtensa: Replace legacy cpu_physical_memory_[un]map() calls
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (69 preceding siblings ...)
2025-10-16 12:15 ` [PULL 70/75] target/tricore: Expand TCGv type for 32-bit target Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 72/75] target/xtensa: Remove target_ulong use in xtensa_tr_translate_insn() Philippe Mathieu-Daudé
` (3 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Commit b7ecba0f6f6 ("docs/devel/loads-stores.rst: Document our
various load and store APIs") mentioned cpu_physical_memory_*()
methods are legacy, the replacement being address_space_*().
Replace the *_map() / *_unmap() methods in the SIMCALL helper,
using the vCPU default address space. No behavioral change expected.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Message-Id: <20251002145742.75624-6-philmd@linaro.org>
Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
---
target/xtensa/xtensa-semi.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
index 636f421da2b..431c263dc57 100644
--- a/target/xtensa/xtensa-semi.c
+++ b/target/xtensa/xtensa-semi.c
@@ -32,6 +32,7 @@
#include "exec/target_page.h"
#include "semihosting/semihost.h"
#include "semihosting/uaccess.h"
+#include "system/memory.h"
#include "qapi/error.h"
#include "qemu/log.h"
@@ -192,7 +193,9 @@ void xtensa_sim_open_console(Chardev *chr)
void HELPER(simcall)(CPUXtensaState *env)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
CPUState *cs = env_cpu(env);
+ AddressSpace *as = cs->as;
uint32_t *regs = env->regs;
switch (regs[2]) {
@@ -215,7 +218,7 @@ void HELPER(simcall)(CPUXtensaState *env)
TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1));
uint32_t io_sz = page_left < len ? page_left : len;
hwaddr sz = io_sz;
- void *buf = cpu_physical_memory_map(paddr, &sz, !is_write);
+ void *buf = address_space_map(as, paddr, &sz, !is_write, attrs);
uint32_t io_done;
bool error = false;
@@ -261,7 +264,7 @@ void HELPER(simcall)(CPUXtensaState *env)
error = true;
io_done = 0;
}
- cpu_physical_memory_unmap(buf, sz, !is_write, io_done);
+ address_space_unmap(as, buf, sz, !is_write, io_done);
} else {
error = true;
regs[3] = TARGET_EINVAL;
@@ -408,11 +411,11 @@ void HELPER(simcall)(CPUXtensaState *env)
while (sz) {
hwaddr len = sz;
- void *buf = cpu_physical_memory_map(base, &len, 1);
+ void *buf = address_space_map(as, base, &len, true, attrs);
if (buf && len) {
memset(buf, regs[4], len);
- cpu_physical_memory_unmap(buf, len, 1, len);
+ address_space_unmap(as, buf, len, true, len);
} else {
len = 1;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 72/75] target/xtensa: Remove target_ulong use in xtensa_tr_translate_insn()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (70 preceding siblings ...)
2025-10-16 12:15 ` [PULL 71/75] target/xtensa: Replace legacy cpu_physical_memory_[un]map() calls Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 73/75] target/xtensa: Remove target_ulong use in xtensa_get_tb_cpu_state() Philippe Mathieu-Daudé
` (2 subsequent siblings)
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Since commit 85c19af63e7 ("include/exec: Use vaddr in DisasContextBase
for virtual addresses") the DisasContextBase::pc_first field is a
vaddr type.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008051529.86378-2-philmd@linaro.org>
---
target/xtensa/translate.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 34ae2f4e162..bb8d2ed86cf 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -1166,7 +1166,7 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUXtensaState *env = cpu_env(cpu);
- target_ulong page_start;
+ vaddr page_start;
/* These two conditions only apply to the first insn in the TB,
but this is the first TranslateOps hook that allows exiting. */
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 73/75] target/xtensa: Remove target_ulong use in xtensa_get_tb_cpu_state()
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (71 preceding siblings ...)
2025-10-16 12:15 ` [PULL 72/75] target/xtensa: Remove target_ulong use in xtensa_tr_translate_insn() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 74/75] linux-user/microblaze: Fix little-endianness binary Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 75/75] mailmap: Unify Clément Mathieu--Drif emails Philippe Mathieu-Daudé
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Since commit bb5de52524c ("target: Widen pc/cs_base in
cpu_get_tb_cpu_state"), cpu_get_tb_cpu_state() expects
a uint64_t type for cs_base.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20251008051529.86378-3-philmd@linaro.org>
---
target/xtensa/cpu.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index ea9b6df3aa2..1eeed44e336 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -59,13 +59,13 @@ static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs)
{
CPUXtensaState *env = cpu_env(cs);
uint32_t flags = 0;
- target_ulong cs_base = 0;
+ uint64_t cs_base = 0;
flags |= xtensa_get_ring(env);
if (env->sregs[PS] & PS_EXCM) {
flags |= XTENSA_TBFLAG_EXCM;
} else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
- target_ulong lend_dist =
+ uint64_t lend_dist =
env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
/*
@@ -83,7 +83,7 @@ static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs)
* for the TB that contains this instruction.
*/
if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
- target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
+ uint64_t lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
cs_base = lend_dist;
if (lbeg_off < 256) {
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 74/75] linux-user/microblaze: Fix little-endianness binary
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (72 preceding siblings ...)
2025-10-16 12:15 ` [PULL 73/75] target/xtensa: Remove target_ulong use in xtensa_get_tb_cpu_state() Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
2025-10-16 12:15 ` [PULL 75/75] mailmap: Unify Clément Mathieu--Drif emails Philippe Mathieu-Daudé
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
MicroBlaze CPU model has a "little-endian" property, pointing to
the @endi internal field. Commit c36ec3a9655 ("hw/microblaze:
Explicit CPU endianness") took care of having all MicroBlaze
boards with an explicit default endianness, so later commit
415aae543ed ("target/microblaze: Consider endianness while
translating code") could infer the endianness at runtime from
the @endi field, and not a compile time via the TARGET_BIG_ENDIAN
definition. Doing so, we forgot to make the endianness explicit
on user emulation, so there all CPUs are started with the default
"little-endian=off" value, leading to breaking support for little
endian binaries:
$ readelf -h ./hello-world-mbel
ELF Header:
Magic: 7f 45 4c 46 01 01 01 00 00 00 00 00 00 00 00 00
Class: ELF32
Data: 2's complement, little endian
$ qemu-microblazeel ./hello-world-mbel
qemu: uncaught target signal 11 (Segmentation fault) - core dumped
Segmentation fault (core dumped)
Fix by restoring the previous behavior of starting with the
builtin endianness of the binary:
$ qemu-microblazeel ./hello-world-mbel
Hello World
Cc: qemu-stable@nongnu.org
Fixes: 415aae543ed ("target/microblaze: Consider endianness while translating code")
Reported-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Message-Id: <20251006173350.17455-1-philmd@linaro.org>
---
linux-user/microblaze/elfload.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/linux-user/microblaze/elfload.c b/linux-user/microblaze/elfload.c
index 7eb1b26d170..bdc0a953d59 100644
--- a/linux-user/microblaze/elfload.c
+++ b/linux-user/microblaze/elfload.c
@@ -8,7 +8,8 @@
const char *get_elf_cpu_model(uint32_t eflags)
{
- return "any";
+ return TARGET_BIG_ENDIAN ? "any,little-endian=off"
+ : "any,little-endian=on";
}
void elf_core_copy_regs(target_elf_gregset_t *r, const CPUMBState *env)
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread* [PULL 75/75] mailmap: Unify Clément Mathieu--Drif emails
2025-10-16 12:14 [PULL 00/75] Misc single binary patches for 2025-10-16 Philippe Mathieu-Daudé
` (73 preceding siblings ...)
2025-10-16 12:15 ` [PULL 74/75] linux-user/microblaze: Fix little-endianness binary Philippe Mathieu-Daudé
@ 2025-10-16 12:15 ` Philippe Mathieu-Daudé
74 siblings, 0 replies; 76+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-10-16 12:15 UTC (permalink / raw)
To: qemu-devel
Do not let git-shortlog make distinction between:
. Clément Mathieu--Drif
. Clement Mathieu--Drif
. CLEMENT MATHIEU--DRIF
as this is the same person.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Clément Mathieu--Drif <clement.mathieu--drif@eviden.com>
Message-Id: <20251009070512.8736-3-philmd@linaro.org>
---
.mailmap | 1 +
1 file changed, 1 insertion(+)
diff --git a/.mailmap b/.mailmap
index e7271852dc6..15bec72470e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -136,6 +136,7 @@ Chen Gang <gang.chen.5i5j@gmail.com>
Chen Gang <gang.chen@sunrus.com.cn>
Chen Wei-Ren <chenwj@iis.sinica.edu.tw>
Christophe Lyon <christophe.lyon@st.com>
+Clément Mathieu--Drif <clement.mathieu--drif@eviden.com>
Collin L. Walling <walling@linux.ibm.com>
Daniel P. Berrangé <berrange@redhat.com>
Eduardo Otubo <otubo@redhat.com>
--
2.51.0
^ permalink raw reply related [flat|nested] 76+ messages in thread