From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH v4 161/163] tcg: Convert qemu_ld{2} to TCGOutOpLoad{2}
Date: Wed, 16 Apr 2025 13:57:55 -0700 [thread overview]
Message-ID: <acf29586-dde7-42c4-9a7b-60b75bdadaca@linaro.org> (raw)
In-Reply-To: <20250415192515.232910-162-richard.henderson@linaro.org>
On 4/15/25 12:25, Richard Henderson wrote:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> tcg/tcg.c | 32 +++++++++++++++-
> tcg/aarch64/tcg-target.c.inc | 30 +++++++++------
> tcg/arm/tcg-target.c.inc | 63 +++++++++++++++++++++++---------
> tcg/i386/tcg-target.c.inc | 47 ++++++++++++++++--------
> tcg/loongarch64/tcg-target.c.inc | 37 ++++++++++---------
> tcg/mips/tcg-target.c.inc | 57 +++++++++++++++++++++--------
> tcg/ppc/tcg-target.c.inc | 45 ++++++++++++++---------
> tcg/riscv/tcg-target.c.inc | 22 ++++++-----
> tcg/s390x/tcg-target.c.inc | 32 +++++++++-------
> tcg/sparc64/tcg-target.c.inc | 21 ++++++-----
> tcg/tci/tcg-target.c.inc | 30 ++++++++++++---
> 11 files changed, 283 insertions(+), 133 deletions(-)
>
> diff --git a/tcg/tcg.c b/tcg/tcg.c
> index 062f176fa5..ac955223a5 100644
> --- a/tcg/tcg.c
> +++ b/tcg/tcg.c
> @@ -1071,6 +1071,18 @@ typedef struct TCGOutOpMul2 {
> TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3);
> } TCGOutOpMul2;
>
> +typedef struct TCGOutOpQemuLdSt {
> + TCGOutOp base;
> + void (*out)(TCGContext *s, TCGType type, TCGReg dest,
> + TCGReg addr, MemOpIdx oi);
> +} TCGOutOpQemuLdSt;
> +
> +typedef struct TCGOutOpQemuLdSt2 {
> + TCGOutOp base;
> + void (*out)(TCGContext *s, TCGType type, TCGReg dlo, TCGReg dhi,
> + TCGReg addr, MemOpIdx oi);
> +} TCGOutOpQemuLdSt2;
> +
> typedef struct TCGOutOpUnary {
> TCGOutOp base;
> void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1);
> @@ -1210,6 +1222,8 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
> OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
> OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
> OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
> + OUTOP(INDEX_op_qemu_ld, TCGOutOpQemuLdSt, outop_qemu_ld),
> + OUTOP(INDEX_op_qemu_ld2, TCGOutOpQemuLdSt2, outop_qemu_ld2),
> OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
> OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
> OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl),
> @@ -2446,7 +2460,7 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
> return true;
> }
> tcg_debug_assert(type == TCG_TYPE_I128);
> - return TCG_TARGET_HAS_qemu_ldst_i128;
> + goto do_lookup;
>
> case INDEX_op_add:
> case INDEX_op_and:
> @@ -2558,6 +2572,7 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
> return false;
> }
>
> + do_lookup:
> outop = all_outop[op];
> tcg_debug_assert(outop != NULL);
>
> @@ -5797,6 +5812,21 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
> }
> break;
>
> + case INDEX_op_qemu_ld:
> + {
> + const TCGOutOpQemuLdSt *out = &outop_qemu_ld;
> + out->out(s, type, new_args[0], new_args[1], new_args[2]);
> + }
> + break;
> +
> + case INDEX_op_qemu_ld2:
> + {
> + const TCGOutOpQemuLdSt2 *out = &outop_qemu_ld2;
> + out->out(s, type, new_args[0], new_args[1],
> + new_args[2], new_args[3]);
> + }
> + break;
> +
> case INDEX_op_brcond:
> {
> const TCGOutOpBrcond *out = &outop_brcond;
> diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
> index f4a0b0e720..21b6ce1229 100644
> --- a/tcg/aarch64/tcg-target.c.inc
> +++ b/tcg/aarch64/tcg-target.c.inc
> @@ -1806,8 +1806,8 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
> }
> }
>
> -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
> - MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext *s, TCGType data_type, TCGReg data_reg,
> + TCGReg addr_reg, MemOpIdx oi)
> {
> TCGLabelQemuLdst *ldst;
> HostAddress h;
> @@ -1822,6 +1822,11 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
> }
> }
>
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
> MemOpIdx oi, TCGType data_type)
> {
> @@ -1940,6 +1945,17 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
> }
> }
>
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
> +{
> + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
> +}
> +
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint = C_O2_I1(r, r, r),
> + .out = tgen_qemu_ld2,
> +};
> +
> static const tcg_insn_unit *tb_ret_addr;
>
> static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
> @@ -2875,15 +2891,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
> TCGArg a2 = args[2];
>
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, a0, a1, a2, ext);
> - break;
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, a0, a1, a2, ext);
> break;
> - case INDEX_op_qemu_ld2:
> - tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
> - break;
> case INDEX_op_qemu_st2:
> tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
> break;
> @@ -3340,10 +3350,6 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> - case INDEX_op_qemu_ld2:
> - return C_O2_I1(r, r, r);
> case INDEX_op_qemu_st:
> return C_O0_I2(rz, r);
> case INDEX_op_qemu_st2:
> diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
> index 29fd82e9e0..681ecc3d7a 100644
> --- a/tcg/arm/tcg-target.c.inc
> +++ b/tcg/arm/tcg-target.c.inc
> @@ -1586,8 +1586,8 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
> }
> }
>
> -static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
> - TCGReg addr, MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
> + TCGReg addr, MemOpIdx oi)
> {
> MemOp opc = get_memop(oi);
> TCGLabelQemuLdst *ldst;
> @@ -1595,7 +1595,41 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
>
> ldst = prepare_host_addr(s, &h, addr, oi, true);
> if (ldst) {
> - ldst->type = data_type;
> + ldst->type = type;
> + ldst->datalo_reg = data;
> + ldst->datahi_reg = -1;
> +
> + /*
> + * This a conditional BL only to load a pointer within this
> + * opcode into LR for the slow path. We will not be using
> + * the value for a tail call.
> + */
> + ldst->label_ptr[0] = s->code_ptr;
> + tcg_out_bl_imm(s, COND_NE, 0);
> + }
> +
> + tcg_out_qemu_ld_direct(s, opc, data, -1, h);
> +
> + if (ldst) {
> + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> + }
> +}
> +
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, q),
> + .out = tgen_qemu_ld,
> +};
> +
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr, MemOpIdx oi)
> +{
> + MemOp opc = get_memop(oi);
> + TCGLabelQemuLdst *ldst;
> + HostAddress h;
> +
> + ldst = prepare_host_addr(s, &h, addr, oi, true);
> + if (ldst) {
> + ldst->type = type;
> ldst->datalo_reg = datalo;
> ldst->datahi_reg = datahi;
>
> @@ -1606,14 +1640,20 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
> */
> ldst->label_ptr[0] = s->code_ptr;
> tcg_out_bl_imm(s, COND_NE, 0);
> + }
>
> - tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
> + tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
> +
> + if (ldst) {
> ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> - } else {
> - tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
> }
> }
>
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint = C_O2_I1(e, p, q),
> + .out = tgen_qemu_ld2,
> +};
> +
> static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
> TCGReg datahi, HostAddress h)
> {
> @@ -2570,13 +2610,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> const int const_args[TCG_MAX_OP_ARGS])
> {
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
> - break;
> - case INDEX_op_qemu_ld2:
> - tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
> - break;
> -
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
> break;
> @@ -2596,10 +2629,6 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, q);
> - case INDEX_op_qemu_ld2:
> - return C_O2_I1(e, p, q);
> case INDEX_op_qemu_st:
> return C_O0_I2(q, q);
> case INDEX_op_qemu_st2:
> diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
> index cb66f6c27f..7ec06f57ee 100644
> --- a/tcg/i386/tcg-target.c.inc
> +++ b/tcg/i386/tcg-target.c.inc
> @@ -2422,23 +2422,50 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
> }
> }
>
> -static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
> - TCGReg addr, MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
> + TCGReg addr, MemOpIdx oi)
> {
> TCGLabelQemuLdst *ldst;
> HostAddress h;
>
> ldst = prepare_host_addr(s, &h, addr, oi, true);
> - tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, get_memop(oi));
> + tcg_out_qemu_ld_direct(s, data, -1, h, type, get_memop(oi));
>
> if (ldst) {
> - ldst->type = data_type;
> + ldst->type = type;
> + ldst->datalo_reg = data;
> + ldst->datahi_reg = -1;
> + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> + }
> +}
> +
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, L),
> + .out = tgen_qemu_ld,
> +};
> +
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr, MemOpIdx oi)
> +{
> + TCGLabelQemuLdst *ldst;
> + HostAddress h;
> +
> + ldst = prepare_host_addr(s, &h, addr, oi, true);
> + tcg_out_qemu_ld_direct(s, datalo, datahi, h, type, get_memop(oi));
> +
> + if (ldst) {
> + ldst->type = type;
> ldst->datalo_reg = datalo;
> ldst->datahi_reg = datahi;
> ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> }
> }
>
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint = C_O2_I1(r, r, L),
> + .out = tgen_qemu_ld2,
> +};
> +
> static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
> HostAddress h, MemOp memop)
> {
> @@ -3552,13 +3579,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> a2 = args[2];
>
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, a0, -1, a1, a2, type);
> - break;
> - case INDEX_op_qemu_ld2:
> - tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
> - break;
> -
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, a0, -1, a1, a2, type);
> break;
> @@ -4119,16 +4139,11 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, L);
> -
> case INDEX_op_qemu_st:
> return (TCG_TARGET_REG_BITS == 32 && flags == MO_8
> ? C_O0_I2(s, L)
> : C_O0_I2(L, L));
>
> - case INDEX_op_qemu_ld2:
> - return C_O2_I1(r, r, L);
> case INDEX_op_qemu_st2:
> return C_O0_I3(L, L, L);
>
> diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
> index d4f65692dd..a1147a1cdc 100644
> --- a/tcg/loongarch64/tcg-target.c.inc
> +++ b/tcg/loongarch64/tcg-target.c.inc
> @@ -1155,22 +1155,27 @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
> }
> }
>
> -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
> - MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
> + TCGReg addr_reg, MemOpIdx oi)
> {
> TCGLabelQemuLdst *ldst;
> HostAddress h;
>
> ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
> - tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
> + tcg_out_qemu_ld_indexed(s, get_memop(oi), type, data_reg, h);
>
> if (ldst) {
> - ldst->type = data_type;
> + ldst->type = type;
> ldst->datalo_reg = data_reg;
> ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> }
> }
>
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
> TCGReg rd, HostAddress h)
> {
> @@ -1258,6 +1263,17 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi
> }
> }
>
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
> +{
> + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
> +}
> +
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint = C_N2_I1(r, r, r),
> + .out = tgen_qemu_ld2,
> +};
> +
> /*
> * Entry-points
> */
> @@ -2008,12 +2024,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> TCGArg a3 = args[3];
>
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, a0, a1, a2, type);
> - break;
> - case INDEX_op_qemu_ld2:
> - tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
> - break;
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, a0, a1, a2, type);
> break;
> @@ -2531,16 +2541,9 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> switch (op) {
> case INDEX_op_qemu_st:
> return C_O0_I2(rz, r);
> -
> - case INDEX_op_qemu_ld2:
> - return C_N2_I1(r, r, r);
> -
> case INDEX_op_qemu_st2:
> return C_O0_I3(r, r, r);
>
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> -
> case INDEX_op_ld_vec:
> case INDEX_op_dupm_vec:
> case INDEX_op_dup_vec:
> diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
> index eaaf0f2024..14bffcd404 100644
> --- a/tcg/mips/tcg-target.c.inc
> +++ b/tcg/mips/tcg-target.c.inc
> @@ -1387,8 +1387,8 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
> }
> }
>
> -static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
> - TCGReg addr, MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
> + TCGReg addr, MemOpIdx oi)
> {
> MemOp opc = get_memop(oi);
> TCGLabelQemuLdst *ldst;
> @@ -1397,19 +1397,56 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
> ldst = prepare_host_addr(s, &h, addr, oi, true);
>
> if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
> - tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type);
> + tcg_out_qemu_ld_direct(s, data, 0, h.base, opc, type);
> } else {
> - tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, data_type);
> + tcg_out_qemu_ld_unalign(s, data, 0, h.base, opc, type);
> }
>
> if (ldst) {
> - ldst->type = data_type;
> + ldst->type = type;
> + ldst->datalo_reg = data;
> + ldst->datahi_reg = 0;
> + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> + }
> +}
> +
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr, MemOpIdx oi)
> +{
> + MemOp opc = get_memop(oi);
> + TCGLabelQemuLdst *ldst;
> + HostAddress h;
> +
> + tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
> + ldst = prepare_host_addr(s, &h, addr, oi, true);
> +
> + if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
> + tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, type);
> + } else {
> + tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, type);
> + }
> +
> + if (ldst) {
> + ldst->type = type;
> ldst->datalo_reg = datalo;
> ldst->datahi_reg = datahi;
> ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> }
> }
>
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + /* Ensure that the mips32 code is compiled but discarded for mips64. */
> + .base.static_constraint =
> + TCG_TARGET_REG_BITS == 32 ? C_O2_I1(r, r, r) : C_NotImplemented,
> + .out =
> + TCG_TARGET_REG_BITS == 32 ? tgen_qemu_ld2 : NULL,
> +};
> +
> static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
> TCGReg base, MemOp opc)
> {
> @@ -2381,14 +2418,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> a2 = args[2];
>
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, a0, 0, a1, a2, type);
> - break;
> - case INDEX_op_qemu_ld2:
> - tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
> - tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
> - break;
> -
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, a0, 0, a1, a2, type);
> break;
> @@ -2409,8 +2438,6 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> case INDEX_op_qemu_st:
> return C_O0_I2(rz, r);
> case INDEX_op_qemu_ld2:
> diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
> index bb26769d53..e4e6b7b2d9 100644
> --- a/tcg/ppc/tcg-target.c.inc
> +++ b/tcg/ppc/tcg-target.c.inc
> @@ -2695,6 +2695,33 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
> }
> }
>
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
> + TCGReg addr, MemOpIdx oi)
> +{
> + tcg_out_qemu_ld(s, data, -1, addr, oi, type);
> +}
> +
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr, MemOpIdx oi)
> +{
> + if (TCG_TARGET_REG_BITS == 32) {
> + tcg_out_qemu_ld(s, datalo, datahi, addr, oi, type);
> + } else {
> + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, true);
> + }
> +}
> +
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint =
> + TCG_TARGET_REG_BITS == 64 ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r),
> + .out = tgen_qemu_ld2,
> +};
> +
> static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
> {
> int i;
> @@ -3779,18 +3806,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> const int const_args[TCG_MAX_OP_ARGS])
> {
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], type);
> - break;
> - case INDEX_op_qemu_ld2:
> - if (TCG_TARGET_REG_BITS == 32) {
> - tcg_out_qemu_ld(s, args[0], args[1], args[2],
> - args[3], TCG_TYPE_I64);
> - break;
> - }
> - tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
> - break;
> -
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, args[0], -1, args[1], args[2], type);
> break;
> @@ -4418,12 +4433,6 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> - case INDEX_op_qemu_ld2:
> - return TCG_TARGET_REG_BITS == 64
> - ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r);
> -
> case INDEX_op_qemu_st:
> return C_O0_I2(r, r);
> case INDEX_op_qemu_st2:
> diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
> index 89c7736f9a..94e6f04fa6 100644
> --- a/tcg/riscv/tcg-target.c.inc
> +++ b/tcg/riscv/tcg-target.c.inc
> @@ -1833,22 +1833,31 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
> }
> }
>
> -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
> - MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
> + TCGReg addr_reg, MemOpIdx oi)
> {
> TCGLabelQemuLdst *ldst;
> TCGReg base;
>
> ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
> - tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
> + tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), type);
>
> if (ldst) {
> - ldst->type = data_type;
> + ldst->type = type;
> ldst->datalo_reg = data_reg;
> ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> }
> }
>
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint = C_NotImplemented,
> +};
> +
> static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
> TCGReg base, MemOp opc)
> {
> @@ -2633,9 +2642,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> TCGArg a2 = args[2];
>
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, a0, a1, a2, type);
> - break;
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, a0, a1, a2, type);
> break;
> @@ -2869,8 +2875,6 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> case INDEX_op_qemu_st:
> return C_O0_I2(rz, r);
>
> diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
> index 652ce9023e..72dd161007 100644
> --- a/tcg/s390x/tcg-target.c.inc
> +++ b/tcg/s390x/tcg-target.c.inc
> @@ -2081,8 +2081,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
> return ldst;
> }
>
> -static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
> - MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext* s, TCGType type, TCGReg data_reg,
> + TCGReg addr_reg, MemOpIdx oi)
> {
> TCGLabelQemuLdst *ldst;
> HostAddress h;
> @@ -2091,12 +2091,17 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
> tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
>
> if (ldst) {
> - ldst->type = data_type;
> + ldst->type = type;
> ldst->datalo_reg = data_reg;
> ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> }
> }
>
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
> MemOpIdx oi, TCGType data_type)
> {
> @@ -2187,6 +2192,17 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
> }
> }
>
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
> +{
> + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
> +}
> +
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint = C_O2_I1(o, m, r),
> + .out = tgen_qemu_ld2,
> +};
> +
> static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
> {
> /* Reuse the zeroing that exists for goto_ptr. */
> @@ -3133,15 +3149,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> const int const_args[TCG_MAX_OP_ARGS])
> {
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, args[0], args[1], args[2], type);
> - break;
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, args[0], args[1], args[2], type);
> break;
> - case INDEX_op_qemu_ld2:
> - tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
> - break;
> case INDEX_op_qemu_st2:
> tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
> break;
> @@ -3594,12 +3604,8 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> case INDEX_op_qemu_st:
> return C_O0_I2(r, r);
> - case INDEX_op_qemu_ld2:
> - return C_O2_I1(o, m, r);
> case INDEX_op_qemu_st2:
> return C_O0_I3(o, m, r);
>
> diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
> index 2f23d713b7..d9a4b4ea36 100644
> --- a/tcg/sparc64/tcg-target.c.inc
> +++ b/tcg/sparc64/tcg-target.c.inc
> @@ -1186,8 +1186,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
> return ldst;
> }
>
> -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
> - MemOpIdx oi, TCGType data_type)
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
> + TCGReg addr, MemOpIdx oi)
> {
> static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
> [MO_UB] = LDUB,
> @@ -1219,12 +1219,21 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
> ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
>
> if (ldst) {
> - ldst->type = data_type;
> + ldst->type = type;
> ldst->datalo_reg = data;
> ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
> }
> }
>
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint = C_NotImplemented,
> +};
> +
> static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
> MemOpIdx oi, TCGType data_type)
> {
> @@ -2063,9 +2072,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> a2 = args[2];
>
> switch (opc) {
> - case INDEX_op_qemu_ld:
> - tcg_out_qemu_ld(s, a0, a1, a2, type);
> - break;
> case INDEX_op_qemu_st:
> tcg_out_qemu_st(s, a0, a1, a2, type);
> break;
> @@ -2082,9 +2088,6 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> -
> case INDEX_op_qemu_st:
> return C_O0_I2(rz, r);
>
> diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
> index 6b8f71f49e..f69e35e6ce 100644
> --- a/tcg/tci/tcg-target.c.inc
> +++ b/tcg/tci/tcg-target.c.inc
> @@ -40,12 +40,8 @@ static TCGConstraintSetIndex
> tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
> {
> switch (op) {
> - case INDEX_op_qemu_ld:
> - return C_O1_I1(r, r);
> case INDEX_op_qemu_st:
> return C_O0_I2(r, r);
> - case INDEX_op_qemu_ld2:
> - return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r);
> case INDEX_op_qemu_st2:
> return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r);
>
> @@ -1197,17 +1193,39 @@ static const TCGOutOpStore outop_st = {
> .out_r = tcg_out_st,
> };
>
> +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
> + TCGReg addr, MemOpIdx oi)
> +{
> + tcg_out_op_rrm(s, INDEX_op_qemu_ld, data, addr, oi);
> +}
> +
> +static const TCGOutOpQemuLdSt outop_qemu_ld = {
> + .base.static_constraint = C_O1_I1(r, r),
> + .out = tgen_qemu_ld,
> +};
> +
> +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
> + TCGReg datahi, TCGReg addr, MemOpIdx oi)
> +{
> + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi);
> + tcg_out_op_rrrr(s, INDEX_op_qemu_ld2, datalo, datahi, addr, TCG_REG_TMP);
> +}
> +
> +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
> + .base.static_constraint =
> + TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r),
> + .out =
> + TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_ld2,
> +};
>
> static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
> const TCGArg args[TCG_MAX_OP_ARGS],
> const int const_args[TCG_MAX_OP_ARGS])
> {
> switch (opc) {
> - case INDEX_op_qemu_ld:
> case INDEX_op_qemu_st:
> tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
> break;
> - case INDEX_op_qemu_ld2:
> case INDEX_op_qemu_st2:
> tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
> tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
next prev parent reply other threads:[~2025-04-16 20:58 UTC|newest]
Thread overview: 316+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-15 19:22 [PATCH v4 000/163] tcg: Convert to TCGOutOp structures Richard Henderson
2025-04-15 19:22 ` [PATCH v4 001/163] tcg: Add all_outop[] Richard Henderson
2025-04-15 19:22 ` [PATCH v4 002/163] tcg: Use extract2 for cross-word 64-bit extract on 32-bit host Richard Henderson
2025-04-15 19:22 ` [PATCH v4 003/163] tcg: Remove INDEX_op_ext{8,16,32}* Richard Henderson
2025-04-15 19:22 ` [PATCH v4 004/163] tcg: Merge INDEX_op_mov_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 005/163] tcg: Convert add to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 006/163] tcg: Merge INDEX_op_add_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 007/163] tcg: Convert and to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 008/163] tcg: Merge INDEX_op_and_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 009/163] tcg/optimize: Fold andc with immediate to and Richard Henderson
2025-04-15 19:22 ` [PATCH v4 010/163] tcg/optimize: Emit add r, r, -1 in fold_setcond_tst_pow2 Richard Henderson
2025-04-15 19:22 ` [PATCH v4 011/163] tcg: Convert andc to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 012/163] tcg: Merge INDEX_op_andc_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 013/163] tcg: Convert or to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 014/163] tcg: Merge INDEX_op_or_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 015/163] tcg/optimize: Fold orc with immediate to or Richard Henderson
2025-04-15 19:22 ` [PATCH v4 016/163] tcg: Convert orc to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 017/163] tcg: Merge INDEX_op_orc_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 018/163] tcg: Convert xor to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 019/163] tcg: Merge INDEX_op_xor_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 020/163] tcg/optimize: Fold eqv with immediate to xor Richard Henderson
2025-04-15 19:22 ` [PATCH v4 021/163] tcg: Convert eqv to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 022/163] tcg: Merge INDEX_op_eqv_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 023/163] tcg: Convert nand to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 024/163] tcg: Merge INDEX_op_nand_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 025/163] tcg/loongarch64: Do not accept constant argument to nor Richard Henderson
2025-04-15 19:22 ` [PATCH v4 026/163] tcg: Convert nor to TCGOutOpBinary Richard Henderson
2025-04-15 19:22 ` [PATCH v4 027/163] tcg: Merge INDEX_op_nor_{i32,i64} Richard Henderson
2025-04-15 19:22 ` [PATCH v4 028/163] tcg/arm: Fix constraints for sub Richard Henderson
2025-04-15 19:23 ` [PATCH v4 029/163] tcg: Convert sub to TCGOutOpSubtract Richard Henderson
2025-04-15 21:00 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 030/163] tcg: Merge INDEX_op_sub_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 031/163] tcg: Convert neg to TCGOutOpUnary Richard Henderson
2025-04-15 19:23 ` [PATCH v4 032/163] tcg: Merge INDEX_op_neg_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 033/163] tcg: Convert not to TCGOutOpUnary Richard Henderson
2025-04-15 19:23 ` [PATCH v4 034/163] tcg: Merge INDEX_op_not_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 035/163] tcg: Convert mul to TCGOutOpBinary Richard Henderson
2025-04-15 19:23 ` [PATCH v4 036/163] tcg: Merge INDEX_op_mul_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 037/163] tcg: Convert muluh to TCGOutOpBinary Richard Henderson
2025-04-15 19:23 ` [PATCH v4 038/163] tcg: Merge INDEX_op_muluh_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 039/163] tcg: Convert mulsh to TCGOutOpBinary Richard Henderson
2025-04-15 19:23 ` [PATCH v4 040/163] tcg: Merge INDEX_op_mulsh_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 041/163] tcg: Convert div to TCGOutOpBinary Richard Henderson
2025-04-15 21:02 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 042/163] tcg: Merge INDEX_op_div_{i32,i64} Richard Henderson
2025-04-15 21:04 ` Pierrick Bouvier
2025-04-22 15:27 ` Philippe Mathieu-Daudé
2025-04-15 19:23 ` [PATCH v4 043/163] tcg: Convert divu to TCGOutOpBinary Richard Henderson
2025-04-15 21:04 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 044/163] tcg: Merge INDEX_op_divu_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 045/163] tcg: Convert div2 to TCGOutOpDivRem Richard Henderson
2025-04-15 19:23 ` [PATCH v4 046/163] tcg: Merge INDEX_op_div2_{i32,i64} Richard Henderson
2025-04-15 21:05 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 047/163] tcg: Convert divu2 to TCGOutOpDivRem Richard Henderson
2025-04-15 21:05 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 048/163] tcg: Merge INDEX_op_divu2_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 049/163] tcg: Convert rem to TCGOutOpBinary Richard Henderson
2025-04-15 21:06 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 050/163] tcg: Merge INDEX_op_rem_{i32,i64} Richard Henderson
2025-04-15 21:06 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 051/163] tcg: Convert remu to TCGOutOpBinary Richard Henderson
2025-04-15 21:07 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 052/163] tcg: Merge INDEX_op_remu_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 053/163] tcg: Convert shl to TCGOutOpBinary Richard Henderson
2025-04-15 19:23 ` [PATCH v4 054/163] tcg: Merge INDEX_op_shl_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 055/163] tcg: Convert shr to TCGOutOpBinary Richard Henderson
2025-04-15 21:08 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 056/163] tcg: Merge INDEX_op_shr_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 057/163] tcg: Convert sar to TCGOutOpBinary Richard Henderson
2025-04-15 21:09 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 058/163] tcg: Merge INDEX_op_sar_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 059/163] tcg: Do not require both rotr and rotl from the backend Richard Henderson
2025-04-15 21:10 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 060/163] tcg: Convert rotl, rotr to TCGOutOpBinary Richard Henderson
2025-04-15 21:10 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 061/163] tcg: Merge INDEX_op_rot{l,r}_{i32,i64} Richard Henderson
2025-04-15 21:11 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 062/163] tcg: Convert clz to TCGOutOpBinary Richard Henderson
2025-04-15 21:12 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 063/163] tcg: Merge INDEX_op_clz_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 064/163] tcg: Convert ctz to TCGOutOpBinary Richard Henderson
2025-04-15 21:13 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 065/163] tcg: Merge INDEX_op_ctz_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 066/163] tcg: Convert ctpop to TCGOutOpUnary Richard Henderson
2025-04-15 21:14 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 067/163] tcg: Merge INDEX_op_ctpop_{i32,i64} Richard Henderson
2025-04-15 21:15 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 068/163] tcg: Convert muls2 to TCGOutOpMul2 Richard Henderson
2025-04-15 21:17 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 069/163] tcg: Merge INDEX_op_muls2_{i32,i64} Richard Henderson
2025-04-15 21:17 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 070/163] tcg: Convert mulu2 to TCGOutOpMul2 Richard Henderson
2025-04-15 21:18 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 071/163] tcg: Merge INDEX_op_mulu2_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 072/163] tcg/loongarch64: Support negsetcond Richard Henderson
2025-04-15 21:19 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 073/163] tcg/mips: " Richard Henderson
2025-04-15 21:20 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 074/163] tcg/tci: " Richard Henderson
2025-04-15 21:20 ` Pierrick Bouvier
2025-04-22 15:28 ` Philippe Mathieu-Daudé
2025-04-15 19:23 ` [PATCH v4 075/163] tcg: Remove TCG_TARGET_HAS_negsetcond_{i32,i64} Richard Henderson
2025-04-22 15:35 ` Philippe Mathieu-Daudé
2025-04-15 19:23 ` [PATCH v4 076/163] tcg: Convert setcond, negsetcond to TCGOutOpSetcond Richard Henderson
2025-04-15 21:21 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 077/163] tcg: Merge INDEX_op_{neg}setcond_{i32,i64}` Richard Henderson
2025-04-15 21:22 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 078/163] tcg: Convert brcond to TCGOutOpBrcond Richard Henderson
2025-04-15 21:23 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 079/163] tcg: Merge INDEX_op_brcond_{i32,i64} Richard Henderson
2025-04-15 21:24 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 080/163] tcg: Convert movcond to TCGOutOpMovcond Richard Henderson
2025-04-15 21:25 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 081/163] tcg: Merge INDEX_op_movcond_{i32,i64} Richard Henderson
2025-04-15 19:23 ` [PATCH v4 082/163] tcg/ppc: Drop fallback constant loading in tcg_out_cmp Richard Henderson
2025-04-15 21:26 ` Pierrick Bouvier
2025-04-16 14:39 ` Nicholas Piggin
2025-04-16 18:57 ` Richard Henderson
2025-04-15 19:23 ` [PATCH v4 083/163] tcg/arm: Expand arguments to tcg_out_cmp2 Richard Henderson
2025-04-15 21:27 ` Pierrick Bouvier
2025-04-22 15:37 ` Philippe Mathieu-Daudé
2025-04-15 19:23 ` [PATCH v4 084/163] tcg/ppc: " Richard Henderson
2025-04-15 21:27 ` Pierrick Bouvier
2025-04-16 14:43 ` Nicholas Piggin
2025-04-22 15:37 ` Philippe Mathieu-Daudé
2025-04-15 19:23 ` [PATCH v4 085/163] tcg: Convert brcond2_i32 to TCGOutOpBrcond2 Richard Henderson
2025-04-15 21:37 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 086/163] tcg: Convert setcond2_i32 to TCGOutOpSetcond2 Richard Henderson
2025-04-15 21:39 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 087/163] tcg: Convert bswap16 to TCGOutOpBswap Richard Henderson
2025-04-15 21:40 ` Pierrick Bouvier
2025-04-15 19:23 ` [PATCH v4 088/163] tcg: Merge INDEX_op_bswap16_{i32,i64} Richard Henderson
2025-04-15 21:41 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 089/163] tcg: Convert bswap32 to TCGOutOpBswap Richard Henderson
2025-04-15 21:46 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 090/163] tcg: Merge INDEX_op_bswap32_{i32,i64} Richard Henderson
2025-04-15 21:47 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 091/163] tcg: Convert bswap64 to TCGOutOpUnary Richard Henderson
2025-04-15 21:48 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 092/163] tcg: Rename INDEX_op_bswap64_i64 to INDEX_op_bswap64 Richard Henderson
2025-04-15 21:48 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 093/163] tcg: Convert extract to TCGOutOpExtract Richard Henderson
2025-04-15 21:50 ` Pierrick Bouvier
2025-06-09 13:52 ` Andrea Bolognani
2025-06-26 16:20 ` Andrea Bolognani
2025-06-27 13:16 ` Richard Henderson
2025-06-27 14:29 ` Philippe Mathieu-Daudé
2025-06-30 12:08 ` Andrea Bolognani
2025-04-15 19:24 ` [PATCH v4 094/163] tcg: Merge INDEX_op_extract_{i32,i64} Richard Henderson
2025-04-15 21:51 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 095/163] tcg: Convert sextract to TCGOutOpExtract Richard Henderson
2025-04-15 21:55 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 096/163] tcg: Merge INDEX_op_sextract_{i32,i64} Richard Henderson
2025-04-15 21:55 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 097/163] tcg: Convert ext_i32_i64 to TCGOutOpUnary Richard Henderson
2025-04-15 21:55 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 098/163] tcg: Convert extu_i32_i64 " Richard Henderson
2025-04-15 21:56 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 099/163] tcg: Convert extrl_i64_i32 " Richard Henderson
2025-04-15 21:57 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 100/163] tcg: Convert extrh_i64_i32 " Richard Henderson
2025-04-15 21:58 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 101/163] tcg: Convert deposit to TCGOutOpDeposit Richard Henderson
2025-04-15 21:59 ` Pierrick Bouvier
2025-08-28 7:37 ` Michael Tokarev
2025-04-15 19:24 ` [PATCH v4 102/163] tcg/aarch64: Improve deposit Richard Henderson
2025-04-15 22:01 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 103/163] tcg: Merge INDEX_op_deposit_{i32,i64} Richard Henderson
2025-04-15 19:24 ` [PATCH v4 104/163] tcg: Convert extract2 to TCGOutOpExtract2 Richard Henderson
2025-04-15 22:01 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 105/163] tcg: Merge INDEX_op_extract2_{i32,i64} Richard Henderson
2025-04-15 22:02 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 106/163] tcg: Expand fallback add2 with 32-bit operations Richard Henderson
2025-04-15 22:03 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 107/163] tcg: Expand fallback sub2 " Richard Henderson
2025-04-15 22:03 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 108/163] tcg: Do not default add2/sub2_i32 for 32-bit hosts Richard Henderson
2025-04-15 22:04 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 109/163] tcg/mips: Drop support for add2/sub2 Richard Henderson
2025-04-15 22:04 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 110/163] tcg/riscv: " Richard Henderson
2025-04-15 22:05 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 111/163] tcg: Move i into each for loop in liveness_pass_1 Richard Henderson
2025-04-15 22:07 ` Pierrick Bouvier
2025-04-16 6:37 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 112/163] tcg: Sink def, nb_iargs, nb_oargs loads " Richard Henderson
2025-04-15 22:09 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 113/163] tcg: Add add/sub with carry opcodes and infrastructure Richard Henderson
2025-04-16 19:01 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 114/163] tcg: Add TCGOutOp structures for add/sub carry opcodes Richard Henderson
2025-04-16 18:56 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 115/163] tcg/optimize: Handle add/sub with " Richard Henderson
2025-04-16 19:02 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 116/163] tcg/optimize: With two const operands, prefer 0 in arg1 Richard Henderson
2025-04-16 19:03 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 117/163] tcg: Use add carry opcodes to expand add2 Richard Henderson
2025-04-16 18:57 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 118/163] tcg: Use sub carry opcodes to expand sub2 Richard Henderson
2025-04-16 18:57 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 119/163] tcg/i386: Honor carry_live in tcg_out_movi Richard Henderson
2025-04-16 18:57 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 120/163] tcg/i386: Implement add/sub carry opcodes Richard Henderson
2025-04-16 18:58 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 121/163] tcg/i386: Remove support for add2/sub2 Richard Henderson
2025-04-16 18:58 ` Pierrick Bouvier
2025-04-22 16:13 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 122/163] tcg/i386: Special case addci r, 0, 0 Richard Henderson
2025-04-16 18:59 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 123/163] tcg: Add tcg_gen_addcio_{i32,i64,tl} Richard Henderson
2025-04-16 18:59 ` Pierrick Bouvier
2025-04-22 16:13 ` Philippe Mathieu-Daudé
2025-04-22 16:30 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 124/163] target/arm: Use tcg_gen_addcio_* for ADCS Richard Henderson
2025-04-16 19:00 ` Pierrick Bouvier
2025-04-22 16:15 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 125/163] target/hppa: Use tcg_gen_addcio_i64 Richard Henderson
2025-04-16 19:05 ` Pierrick Bouvier
2025-04-22 16:17 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 126/163] target/microblaze: Use tcg_gen_addcio_i32 Richard Henderson
2025-04-16 19:05 ` Pierrick Bouvier
2025-04-22 16:28 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 127/163] target/openrisc: Use tcg_gen_addcio_* for ADDC Richard Henderson
2025-04-16 19:05 ` Pierrick Bouvier
2025-04-22 16:32 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 128/163] target/ppc: Use tcg_gen_addcio_tl for ADD and SUBF Richard Henderson
2025-04-16 14:08 ` Nicholas Piggin
2025-04-16 19:08 ` Pierrick Bouvier
2025-04-22 16:33 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 129/163] target/s390x: Use tcg_gen_addcio_i64 for op_addc64 Richard Henderson
2025-04-16 19:09 ` Pierrick Bouvier
2025-04-22 16:33 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 130/163] target/sh4: Use tcg_gen_addcio_i32 for addc Richard Henderson
2025-04-16 19:09 ` Pierrick Bouvier
2025-04-22 16:34 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 131/163] target/sparc: Use tcg_gen_addcio_tl for gen_op_addcc_int Richard Henderson
2025-04-16 19:09 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 132/163] target/tricore: Use tcg_gen_addcio_i32 for gen_addc_CC Richard Henderson
2025-04-16 19:09 ` Pierrick Bouvier
2025-04-22 16:38 ` Philippe Mathieu-Daudé
2025-04-15 19:24 ` [PATCH v4 133/163] tcg/aarch64: Implement add/sub carry opcodes Richard Henderson
2025-04-16 19:10 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 134/163] tcg/aarch64: Remove support for add2/sub2 Richard Henderson
2025-04-16 19:13 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 135/163] tcg/arm: Implement add/sub carry opcodes Richard Henderson
2025-04-16 19:14 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 136/163] tcg/arm: Remove support for add2/sub2 Richard Henderson
2025-04-16 19:14 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 137/163] tcg/ppc: Implement add/sub carry opcodes Richard Henderson
2025-04-16 19:14 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 138/163] tcg/ppc: Remove support for add2/sub2 Richard Henderson
2025-04-16 19:15 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 139/163] tcg/s390x: Honor carry_live in tcg_out_movi Richard Henderson
2025-04-16 19:15 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 140/163] tcg/s390: Add TCG_CT_CONST_N32 Richard Henderson
2025-04-16 19:16 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 141/163] tcg/s390x: Implement add/sub carry opcodes Richard Henderson
2025-04-16 19:16 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 142/163] tcg/s390x: Use ADD LOGICAL WITH SIGNED IMMEDIATE Richard Henderson
2025-04-16 19:18 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 143/163] tcg/s390x: Remove support for add2/sub2 Richard Henderson
2025-04-16 19:18 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 144/163] tcg/sparc64: Hoist tcg_cond_to_bcond lookup out of tcg_out_movcc Richard Henderson
2025-04-16 6:40 ` Philippe Mathieu-Daudé
2025-04-16 19:19 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 145/163] tcg/sparc64: Implement add/sub carry opcodes Richard Henderson
2025-04-16 19:20 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 146/163] tcg/sparc64: Remove support for add2/sub2 Richard Henderson
2025-04-16 19:20 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 147/163] tcg/tci: Implement add/sub carry opcodes Richard Henderson
2025-04-16 19:36 ` Pierrick Bouvier
2025-04-15 19:24 ` [PATCH v4 148/163] tcg/tci: Remove support for add2/sub2 Richard Henderson
2025-04-16 19:37 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 149/163] tcg: Remove add2/sub2 opcodes Richard Henderson
2025-04-16 19:37 ` Pierrick Bouvier
2025-04-22 16:42 ` Philippe Mathieu-Daudé
2025-04-22 17:10 ` Richard Henderson
2025-04-15 19:25 ` [PATCH v4 150/163] tcg: Formalize tcg_out_mb Richard Henderson
2025-04-16 19:38 ` Pierrick Bouvier
2025-04-22 16:44 ` Philippe Mathieu-Daudé
2025-04-15 19:25 ` [PATCH v4 151/163] tcg: Formalize tcg_out_br Richard Henderson
2025-04-16 19:38 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 152/163] tcg: Formalize tcg_out_goto_ptr Richard Henderson
2025-04-16 20:45 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 153/163] tcg: Assign TCGOP_TYPE in liveness_pass_2 Richard Henderson
2025-04-16 20:46 ` Pierrick Bouvier
2025-04-18 10:46 ` Nicholas Piggin
2025-04-21 16:28 ` Richard Henderson
2025-04-15 19:25 ` [PATCH v4 154/163] tcg: Convert ld to TCGOutOpLoad Richard Henderson
2025-04-16 20:52 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 155/163] tcg: Merge INDEX_op_ld*_{i32,i64} Richard Henderson
2025-04-16 20:53 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 156/163] tcg: Convert st to TCGOutOpStore Richard Henderson
2025-04-16 20:53 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 157/163] tcg: Merge INDEX_op_st*_{i32,i64} Richard Henderson
2025-04-16 7:05 ` Philippe Mathieu-Daudé
2025-04-16 20:53 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 158/163] tcg: Stash MemOp size in TCGOP_FLAGS Richard Henderson
2025-04-16 6:55 ` Philippe Mathieu-Daudé
2025-04-16 20:54 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 159/163] tcg: Remove INDEX_op_qemu_st8_* Richard Henderson
2025-04-16 6:55 ` Philippe Mathieu-Daudé
2025-04-16 19:24 ` Richard Henderson
2025-04-16 20:55 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 160/163] tcg: Merge INDEX_op_{ld,st}_{i32,i64,i128} Richard Henderson
2025-04-16 20:56 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 161/163] tcg: Convert qemu_ld{2} to TCGOutOpLoad{2} Richard Henderson
2025-04-16 20:57 ` Pierrick Bouvier [this message]
2025-04-15 19:25 ` [PATCH v4 162/163] tcg: Convert qemu_st{2} to TCGOutOpLdSt{2} Richard Henderson
2025-04-16 20:58 ` Pierrick Bouvier
2025-04-15 19:25 ` [PATCH v4 163/163] tcg: Remove tcg_out_op Richard Henderson
2025-04-16 19:04 ` Pierrick Bouvier
2025-04-16 13:24 ` [PATCH v4 000/163] tcg: Convert to TCGOutOp structures Nicholas Piggin
2025-04-16 23:38 ` Pierrick Bouvier
2025-04-17 0:18 ` Richard Henderson
2025-04-17 0:49 ` Pierrick Bouvier
2025-04-17 12:02 ` BALATON Zoltan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=acf29586-dde7-42c4-9a7b-60b75bdadaca@linaro.org \
--to=pierrick.bouvier@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).