From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: philmd@linaro.org, jiaxun.yang@flygoat.com, crwulff@gmail.com,
marex@denx.de, ysato@users.sourceforge.jp,
mark.cave-ayland@ilande.co.uk
Subject: [PATCH 11/16] target/sh4: Use MO_ALIGN where required
Date: Tue, 2 May 2023 17:08:41 +0100 [thread overview]
Message-ID: <20230502160846.1289975-12-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230502160846.1289975-1-richard.henderson@linaro.org>
Mark all memory operations that are not already marked with UNALIGN.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/sh4/translate.c | 102 ++++++++++++++++++++++++++---------------
1 file changed, 66 insertions(+), 36 deletions(-)
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 6e40d5dd6a..0dedbb8210 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -527,13 +527,15 @@ static void _decode_opc(DisasContext * ctx)
case 0x9000: /* mov.w @(disp,PC),Rn */
{
TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESW | MO_ALIGN);
}
return;
case 0xd000: /* mov.l @(disp,PC),Rn */
{
TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESL | MO_ALIGN);
}
return;
case 0x7000: /* add #imm,Rn */
@@ -801,9 +803,11 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv arg0, arg1;
arg0 = tcg_temp_new();
- tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
+ MO_TESL | MO_ALIGN);
arg1 = tcg_temp_new();
- tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
+ MO_TESL | MO_ALIGN);
gen_helper_macl(cpu_env, arg0, arg1);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
@@ -813,9 +817,11 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv arg0, arg1;
arg0 = tcg_temp_new();
- tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
+ MO_TESL | MO_ALIGN);
arg1 = tcg_temp_new();
- tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
+ MO_TESL | MO_ALIGN);
gen_helper_macw(cpu_env, arg0, arg1);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
@@ -961,30 +967,36 @@ static void _decode_opc(DisasContext * ctx)
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
- tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ);
+ tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
+ MO_TEUQ | MO_ALIGN);
} else {
- tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
+ MO_TEUL | MO_ALIGN);
}
return;
case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
+ MO_TEUQ | MO_ALIGN);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
} else {
- tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TEUL | MO_ALIGN);
}
return;
case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
+ MO_TEUQ | MO_ALIGN);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
} else {
- tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TEUL | MO_ALIGN);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
}
return;
@@ -996,10 +1008,12 @@ static void _decode_opc(DisasContext * ctx)
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
tcg_gen_subi_i32(addr, REG(B11_8), 8);
- tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
+ MO_TEUQ | MO_ALIGN);
} else {
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
+ MO_TEUL | MO_ALIGN);
}
tcg_gen_mov_i32(REG(B11_8), addr);
}
@@ -1011,10 +1025,12 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ);
+ tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
+ MO_TEUQ | MO_ALIGN);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
} else {
- tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
+ MO_TEUL | MO_ALIGN);
}
}
return;
@@ -1026,9 +1042,11 @@ static void _decode_opc(DisasContext * ctx)
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
- tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
+ MO_TEUQ | MO_ALIGN);
} else {
- tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
+ MO_TEUL | MO_ALIGN);
}
}
return;
@@ -1158,14 +1176,14 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
- tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
}
return;
case 0xc600: /* mov.l @(disp,GBR),R0 */
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
- tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
}
return;
case 0xc000: /* mov.b R0,@(disp,GBR) */
@@ -1179,14 +1197,14 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
- tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
}
return;
case 0xc200: /* mov.l R0,@(disp,GBR) */
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
- tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
}
return;
case 0x8000: /* mov.b R0,@(disp,Rn) */
@@ -1286,7 +1304,8 @@ static void _decode_opc(DisasContext * ctx)
return;
case 0x4087: /* ldc.l @Rm+,Rn_BANK */
CHECK_PRIVILEGED
- tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
+ MO_TESL | MO_ALIGN);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
return;
case 0x0082: /* stc Rm_BANK,Rn */
@@ -1298,7 +1317,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
+ MO_TEUL | MO_ALIGN);
tcg_gen_mov_i32(REG(B11_8), addr);
}
return;
@@ -1354,7 +1374,8 @@ static void _decode_opc(DisasContext * ctx)
CHECK_PRIVILEGED
{
TCGv val = tcg_temp_new();
- tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
+ MO_TESL | MO_ALIGN);
tcg_gen_andi_i32(val, val, 0x700083f3);
gen_write_sr(val);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
@@ -1372,7 +1393,7 @@ static void _decode_opc(DisasContext * ctx)
TCGv val = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
gen_read_sr(val);
- tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
tcg_gen_mov_i32(REG(B11_8), addr);
}
return;
@@ -1383,7 +1404,8 @@ static void _decode_opc(DisasContext * ctx)
return; \
case ldpnum: \
prechk \
- tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
+ tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \
+ MO_TESL | MO_ALIGN); \
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
return;
#define ST(reg,stnum,stpnum,prechk) \
@@ -1396,7 +1418,8 @@ static void _decode_opc(DisasContext * ctx)
{ \
TCGv addr = tcg_temp_new(); \
tcg_gen_subi_i32(addr, REG(B11_8), 4); \
- tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
+ tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \
+ MO_TEUL | MO_ALIGN); \
tcg_gen_mov_i32(REG(B11_8), addr); \
} \
return;
@@ -1423,7 +1446,8 @@ static void _decode_opc(DisasContext * ctx)
CHECK_FPU_ENABLED
{
TCGv addr = tcg_temp_new();
- tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
+ MO_TESL | MO_ALIGN);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
gen_helper_ld_fpscr(cpu_env, addr);
ctx->base.is_jmp = DISAS_STOP;
@@ -1441,16 +1465,18 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
tcg_gen_mov_i32(REG(B11_8), addr);
}
return;
case 0x00c3: /* movca.l R0,@Rm */
{
TCGv val = tcg_temp_new();
- tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
+ MO_TEUL | MO_ALIGN);
gen_helper_movcal(cpu_env, REG(B11_8), val);
- tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
+ MO_TEUL | MO_ALIGN);
}
ctx->has_movcal = 1;
return;
@@ -1492,11 +1518,13 @@ static void _decode_opc(DisasContext * ctx)
cpu_lock_addr, fail);
tmp = tcg_temp_new();
tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
- REG(0), ctx->memidx, MO_TEUL);
+ REG(0), ctx->memidx,
+ MO_TEUL | MO_ALIGN);
tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
} else {
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
- tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
+ MO_TEUL | MO_ALIGN);
tcg_gen_movi_i32(cpu_sr_t, 1);
}
tcg_gen_br(done);
@@ -1521,11 +1549,13 @@ static void _decode_opc(DisasContext * ctx)
if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
TCGv tmp = tcg_temp_new();
tcg_gen_mov_i32(tmp, REG(B11_8));
- tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
+ MO_TESL | MO_ALIGN);
tcg_gen_mov_i32(cpu_lock_value, REG(0));
tcg_gen_mov_i32(cpu_lock_addr, tmp);
} else {
- tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
+ MO_TESL | MO_ALIGN);
tcg_gen_movi_i32(cpu_lock_addr, 0);
}
return;
--
2.34.1
next prev parent reply other threads:[~2023-05-02 16:09 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-02 16:08 [PATCH 00/16] tcg: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 01/16] target/alpha: Use MO_ALIGN for system UNALIGN() Richard Henderson
2023-05-02 16:08 ` [PATCH 02/16] target/alpha: Use MO_ALIGN where required Richard Henderson
2023-05-02 16:08 ` [PATCH 03/16] target/alpha: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 04/16] target/hppa: Use MO_ALIGN for system UNALIGN() Richard Henderson
2023-05-02 16:08 ` [PATCH 05/16] target/hppa: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 06/16] target/mips: Add MO_ALIGN to gen_llwp, gen_scwp Richard Henderson
2023-05-02 16:08 ` [PATCH 07/16] target/mips: Add missing default_tcg_memop_mask Richard Henderson
2023-05-02 16:08 ` [PATCH 08/16] target/mips: Use MO_ALIGN instead of 0 Richard Henderson
2023-05-10 13:37 ` Philippe Mathieu-Daudé
2023-05-02 16:08 ` [PATCH 09/16] target/mips: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 10/16] target/nios2: " Richard Henderson
2023-05-10 14:45 ` Philippe Mathieu-Daudé
2023-05-02 16:08 ` Richard Henderson [this message]
2023-05-10 13:41 ` [PATCH 11/16] target/sh4: Use MO_ALIGN where required Philippe Mathieu-Daudé
2023-05-02 16:08 ` [PATCH 12/16] target/sh4: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-10 14:44 ` Philippe Mathieu-Daudé
2023-05-02 16:08 ` [PATCH 13/16] target/sparc: Use MO_ALIGN where required Richard Henderson
2023-05-03 20:26 ` Mark Cave-Ayland
2023-05-02 16:08 ` [PATCH 14/16] target/sparc: Use cpu_ld*_code_mmu Richard Henderson
2023-05-03 20:27 ` Mark Cave-Ayland
2023-05-02 16:08 ` [PATCH 15/16] target/sparc: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-03 20:28 ` Mark Cave-Ayland
2023-05-02 16:08 ` [PATCH 16/16] tcg: " Richard Henderson
2023-05-10 14:43 ` Philippe Mathieu-Daudé
2023-05-10 15:11 ` Richard Henderson
2023-05-10 11:16 ` [PATCH 00/16] " Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230502160846.1289975-12-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=crwulff@gmail.com \
--cc=jiaxun.yang@flygoat.com \
--cc=marex@denx.de \
--cc=mark.cave-ayland@ilande.co.uk \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=ysato@users.sourceforge.jp \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).