From: Alistair Francis <alistair23@gmail.com>
To: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
Cc: qemu-devel@nongnu.org, qemu-riscv@nongnu.org, palmer@dabbelt.com,
alistair.francis@wdc.com, dbarboza@ventanamicro.com,
liwei1518@gmail.com, bmeng.cn@gmail.com
Subject: Re: [PATCH v2 05/11] target/riscv: Support Zama16b extension
Date: Wed, 3 Jul 2024 10:12:15 +1000 [thread overview]
Message-ID: <CAKmqyKMcf4ZWApkUSV-VbpSKpXeBE_uTNvT1QG-w00LbSuj8hQ@mail.gmail.com> (raw)
In-Reply-To: <20240630030559.877-6-zhiwei_liu@linux.alibaba.com>
On Sun, Jun 30, 2024 at 1:11 PM LIU Zhiwei <zhiwei_liu@linux.alibaba.com> wrote:
>
> Zama16b is the property that misaligned load/stores/atomics within
> a naturally aligned 16-byte region are atomic.
>
> According to the specification, Zama16b applies only to AMOs, loads
> and stores defined in the base ISAs, and loads and stores of no more
> than XLEN bits defined in the F, D, and Q extensions. Thus it should
> not apply to zacas or RVC instructions.
>
> For an instruction in that set, if all accessed bytes lie within 16B granule,
> the instruction will not raise an exception for reasons of address alignment,
> and the instruction will give rise to only one memory operation for the
> purposes of RVWMO—i.e., it will execute atomically.
>
> Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/cpu.c | 2 ++
> target/riscv/cpu_cfg.h | 1 +
> target/riscv/insn_trans/trans_rva.c.inc | 42 ++++++++++++++-----------
> target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++--
> target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++--
> target/riscv/insn_trans/trans_rvi.c.inc | 6 ++++
> 6 files changed, 57 insertions(+), 22 deletions(-)
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index 1d1402775a..5219b44176 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -118,6 +118,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
> ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
> ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
> ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
> + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
> ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
> ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
> ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
> @@ -1476,6 +1477,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
> MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
> MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
> MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
> + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
> MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
> MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
> MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
> diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
> index d85e54b475..ddbfae37e5 100644
> --- a/target/riscv/cpu_cfg.h
> +++ b/target/riscv/cpu_cfg.h
> @@ -83,6 +83,7 @@ struct RISCVCPUConfig {
> bool ext_zdinx;
> bool ext_zaamo;
> bool ext_zacas;
> + bool ext_zama16b;
> bool ext_zalrsc;
> bool ext_zawrs;
> bool ext_zfa;
> diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
> index 4a9e4591d1..eb080baddd 100644
> --- a/target/riscv/insn_trans/trans_rva.c.inc
> +++ b/target/riscv/insn_trans/trans_rva.c.inc
> @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
> TCGv dest = dest_gpr(ctx, a->rd);
> TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
>
> + if (ctx->cfg_ptr->ext_zama16b) {
> + mop |= MO_ATOM_WITHIN16;
> + } else {
> + mop |= MO_ALIGN;
> + }
> +
> decode_save_opc(ctx);
> src1 = get_address(ctx, a->rs1, 0);
> func(dest, src1, src2, ctx->mem_idx, mop);
> @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
> static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL);
> }
>
> static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL);
> }
>
> static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL);
> }
>
> static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL);
> }
>
> static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL);
> }
>
> static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL);
> }
>
> static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL);
> }
>
> static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL);
> }
>
> static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
> {
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL);
> }
>
> static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
> @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ);
> }
>
> static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ);
> }
>
> static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ);
> }
>
> static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ);
> }
>
> static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ);
> }
>
> static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ);
> }
>
> static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ);
> }
>
> static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ);
> }
>
> static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
> {
> REQUIRE_64BIT(ctx);
> REQUIRE_A_OR_ZAAMO(ctx);
> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ));
> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ);
> }
> diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
> index d9ce9e407f..1f5fac65a2 100644
> --- a/target/riscv/insn_trans/trans_rvd.c.inc
> +++ b/target/riscv/insn_trans/trans_rvd.c.inc
> @@ -42,13 +42,18 @@
> static bool trans_fld(DisasContext *ctx, arg_fld *a)
> {
> TCGv addr;
> + MemOp memop = MO_TEUQ;
>
> REQUIRE_FPU;
> REQUIRE_EXT(ctx, RVD);
>
> + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
> + memop |= MO_ATOM_WITHIN16;
> + }
> +
> decode_save_opc(ctx);
> addr = get_address(ctx, a->rs1, a->imm);
> - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ);
> + tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop);
>
> mark_fs_dirty(ctx);
> return true;
> @@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
> static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
> {
> TCGv addr;
> + MemOp memop = MO_TEUQ;
>
> REQUIRE_FPU;
> REQUIRE_EXT(ctx, RVD);
>
> + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
> + memop |= MO_ATOM_WITHIN16;
> + }
> +
> decode_save_opc(ctx);
> addr = get_address(ctx, a->rs1, a->imm);
> - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ);
> + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
> return true;
> }
>
> diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
> index 97a368970b..f771aa1939 100644
> --- a/target/riscv/insn_trans/trans_rvf.c.inc
> +++ b/target/riscv/insn_trans/trans_rvf.c.inc
> @@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
> {
> TCGv_i64 dest;
> TCGv addr;
> + MemOp memop = MO_TEUL;
>
> REQUIRE_FPU;
> REQUIRE_EXT(ctx, RVF);
>
> + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
> + memop |= MO_ATOM_WITHIN16;
> + }
> +
> decode_save_opc(ctx);
> addr = get_address(ctx, a->rs1, a->imm);
> dest = cpu_fpr[a->rd];
> - tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
> + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop);
> gen_nanbox_s(dest, dest);
>
> mark_fs_dirty(ctx);
> @@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
> static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
> {
> TCGv addr;
> + MemOp memop = MO_TEUL;
>
> REQUIRE_FPU;
> REQUIRE_EXT(ctx, RVF);
>
> + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
> + memop |= MO_ATOM_WITHIN16;
> + }
> +
> decode_save_opc(ctx);
> addr = get_address(ctx, a->rs1, a->imm);
> - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
> + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
> return true;
> }
>
> diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
> index ad40d3e87f..98e3806d5e 100644
> --- a/target/riscv/insn_trans/trans_rvi.c.inc
> +++ b/target/riscv/insn_trans/trans_rvi.c.inc
> @@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
> {
> bool out;
>
> + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
> + memop |= MO_ATOM_WITHIN16;
> + }
> decode_save_opc(ctx);
> if (get_xl(ctx) == MXL_RV128) {
> out = gen_load_i128(ctx, a, memop);
> @@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
>
> static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
> {
> + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
> + memop |= MO_ATOM_WITHIN16;
> + }
> decode_save_opc(ctx);
> if (get_xl(ctx) == MXL_RV128) {
> return gen_store_i128(ctx, a, memop);
> --
> 2.25.1
>
>
next prev parent reply other threads:[~2024-07-03 0:13 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-30 3:05 [PATCH v2 00/11] target/riscv: Support zimop/zcmop/zama16b/zabha LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 01/11] target/riscv: Add zimop extension LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 02/11] disas/riscv: Support zimop disassemble LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 03/11] target/riscv: Add zcmop extension LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 04/11] disas/riscv: Support zcmop disassemble LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 05/11] target/riscv: Support Zama16b extension LIU Zhiwei
2024-07-03 0:12 ` Alistair Francis [this message]
2024-06-30 3:05 ` [PATCH v2 06/11] target/riscv: Move gen_amo before implement Zabha LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 07/11] target/riscv: Add AMO instructions for Zabha LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 08/11] target/riscv: Move gen_cmpxchg before adding amocas.[b|h] LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 09/11] target/riscv: Add amocas.[b|h] for Zabha LIU Zhiwei
2024-06-30 3:05 ` [PATCH v2 10/11] target/riscv: Enable zabha for max cpu LIU Zhiwei
2024-07-03 0:29 ` Alistair Francis
2024-06-30 3:05 ` [PATCH v2 11/11] disas/riscv: Support zabha disassemble LIU Zhiwei
2024-07-02 22:34 ` [PATCH v2 00/11] target/riscv: Support zimop/zcmop/zama16b/zabha Deepak Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CAKmqyKMcf4ZWApkUSV-VbpSKpXeBE_uTNvT1QG-w00LbSuj8hQ@mail.gmail.com \
--to=alistair23@gmail.com \
--cc=alistair.francis@wdc.com \
--cc=bmeng.cn@gmail.com \
--cc=dbarboza@ventanamicro.com \
--cc=liwei1518@gmail.com \
--cc=palmer@dabbelt.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=zhiwei_liu@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).