From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: philmd@linaro.org, jiaxun.yang@flygoat.com, crwulff@gmail.com,
marex@denx.de, ysato@users.sourceforge.jp,
mark.cave-ayland@ilande.co.uk
Subject: [PATCH 02/16] target/alpha: Use MO_ALIGN where required
Date: Tue, 2 May 2023 17:08:32 +0100 [thread overview]
Message-ID: <20230502160846.1289975-3-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230502160846.1289975-1-richard.henderson@linaro.org>
Mark all memory operations that are not already marked with UNALIGN.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/alpha/translate.c | 36 ++++++++++++++++++++----------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index ffbac1c114..be8adb2526 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -2399,21 +2399,21 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
@@ -2438,11 +2438,13 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
+ MO_LESL | MO_ALIGN);
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
+ MO_LEUQ | MO_ALIGN);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
@@ -2453,12 +2455,14 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0xE:
/* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
+ MO_LESL | MO_ALIGN);
break;
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
+ MO_LEUQ | MO_ALIGN);
break;
}
break;
@@ -2659,7 +2663,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x1:
/* Quadword physical access */
@@ -2667,17 +2671,17 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x2:
/* Longword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LESL);
+ MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
break;
case 0x3:
/* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LEUQ);
+ MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
break;
case 0x4:
/* Longword virtual access */
@@ -2771,11 +2775,11 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2A:
/* LDL_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
break;
case 0x2B:
/* LDQ_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
break;
case 0x2C:
/* STL */
@@ -2788,12 +2792,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x2E:
/* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LESL);
+ ctx->mem_idx, MO_LESL | MO_ALIGN);
break;
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LEUQ);
+ ctx->mem_idx, MO_LEUQ | MO_ALIGN);
break;
case 0x30:
/* BR */
--
2.34.1
next prev parent reply other threads:[~2023-05-02 16:09 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-02 16:08 [PATCH 00/16] tcg: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 01/16] target/alpha: Use MO_ALIGN for system UNALIGN() Richard Henderson
2023-05-02 16:08 ` Richard Henderson [this message]
2023-05-02 16:08 ` [PATCH 03/16] target/alpha: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 04/16] target/hppa: Use MO_ALIGN for system UNALIGN() Richard Henderson
2023-05-02 16:08 ` [PATCH 05/16] target/hppa: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 06/16] target/mips: Add MO_ALIGN to gen_llwp, gen_scwp Richard Henderson
2023-05-02 16:08 ` [PATCH 07/16] target/mips: Add missing default_tcg_memop_mask Richard Henderson
2023-05-02 16:08 ` [PATCH 08/16] target/mips: Use MO_ALIGN instead of 0 Richard Henderson
2023-05-10 13:37 ` Philippe Mathieu-Daudé
2023-05-02 16:08 ` [PATCH 09/16] target/mips: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-02 16:08 ` [PATCH 10/16] target/nios2: " Richard Henderson
2023-05-10 14:45 ` Philippe Mathieu-Daudé
2023-05-02 16:08 ` [PATCH 11/16] target/sh4: Use MO_ALIGN where required Richard Henderson
2023-05-10 13:41 ` Philippe Mathieu-Daudé
2023-05-02 16:08 ` [PATCH 12/16] target/sh4: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-10 14:44 ` Philippe Mathieu-Daudé
2023-05-02 16:08 ` [PATCH 13/16] target/sparc: Use MO_ALIGN where required Richard Henderson
2023-05-03 20:26 ` Mark Cave-Ayland
2023-05-02 16:08 ` [PATCH 14/16] target/sparc: Use cpu_ld*_code_mmu Richard Henderson
2023-05-03 20:27 ` Mark Cave-Ayland
2023-05-02 16:08 ` [PATCH 15/16] target/sparc: Remove TARGET_ALIGNED_ONLY Richard Henderson
2023-05-03 20:28 ` Mark Cave-Ayland
2023-05-02 16:08 ` [PATCH 16/16] tcg: " Richard Henderson
2023-05-10 14:43 ` Philippe Mathieu-Daudé
2023-05-10 15:11 ` Richard Henderson
2023-05-10 11:16 ` [PATCH 00/16] " Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230502160846.1289975-3-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=crwulff@gmail.com \
--cc=jiaxun.yang@flygoat.com \
--cc=marex@denx.de \
--cc=mark.cave-ayland@ilande.co.uk \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=ysato@users.sourceforge.jp \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).