From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [PULL 17/39] tcg/aarch64: Support TCG_COND_TST{EQ,NE}
Date: Mon, 5 Feb 2024 07:40:30 +1000 [thread overview]
Message-ID: <20240204214052.5639-18-richard.henderson@linaro.org> (raw)
In-Reply-To: <20240204214052.5639-1-richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/aarch64/tcg-target-con-set.h | 5 +--
tcg/aarch64/tcg-target-con-str.h | 1 +
tcg/aarch64/tcg-target.h | 2 +-
tcg/aarch64/tcg-target.c.inc | 56 ++++++++++++++++++++++----------
4 files changed, 44 insertions(+), 20 deletions(-)
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
index 3fdee26a3d..44fcc1206e 100644
--- a/tcg/aarch64/tcg-target-con-set.h
+++ b/tcg/aarch64/tcg-target-con-set.h
@@ -10,7 +10,7 @@
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
-C_O0_I2(r, rA)
+C_O0_I2(r, rC)
C_O0_I2(rZ, r)
C_O0_I2(w, r)
C_O0_I3(rZ, rZ, r)
@@ -22,6 +22,7 @@ C_O1_I2(r, 0, rZ)
C_O1_I2(r, r, r)
C_O1_I2(r, r, rA)
C_O1_I2(r, r, rAL)
+C_O1_I2(r, r, rC)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rL)
C_O1_I2(r, rZ, rZ)
@@ -31,6 +32,6 @@ C_O1_I2(w, w, wN)
C_O1_I2(w, w, wO)
C_O1_I2(w, w, wZ)
C_O1_I3(w, w, w, w)
-C_O1_I4(r, r, rA, rZ, rZ)
+C_O1_I4(r, r, rC, rZ, rZ)
C_O2_I1(r, r, r)
C_O2_I4(r, r, rZ, rZ, rA, rMZ)
diff --git a/tcg/aarch64/tcg-target-con-str.h b/tcg/aarch64/tcg-target-con-str.h
index fb1a845b4f..48e1722c68 100644
--- a/tcg/aarch64/tcg-target-con-str.h
+++ b/tcg/aarch64/tcg-target-con-str.h
@@ -16,6 +16,7 @@ REGS('w', ALL_VECTOR_REGS)
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('A', TCG_CT_CONST_AIMM)
+CONST('C', TCG_CT_CONST_CMP)
CONST('L', TCG_CT_CONST_LIMM)
CONST('M', TCG_CT_CONST_MONE)
CONST('O', TCG_CT_CONST_ORRI)
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index b4ac13be7b..ef5ebe91bd 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -138,7 +138,7 @@ typedef enum {
#define TCG_TARGET_HAS_qemu_ldst_i128 1
#endif
-#define TCG_TARGET_HAS_tst 0
+#define TCG_TARGET_HAS_tst 1
#define TCG_TARGET_HAS_v64 1
#define TCG_TARGET_HAS_v128 1
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 420e4a35ea..70df250c04 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -126,6 +126,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_MONE 0x800
#define TCG_CT_CONST_ORRI 0x1000
#define TCG_CT_CONST_ANDI 0x2000
+#define TCG_CT_CONST_CMP 0x4000
#define ALL_GENERAL_REGS 0xffffffffu
#define ALL_VECTOR_REGS 0xffffffff00000000ull
@@ -279,6 +280,15 @@ static bool tcg_target_const_match(int64_t val, int ct,
if (type == TCG_TYPE_I32) {
val = (int32_t)val;
}
+
+ if (ct & TCG_CT_CONST_CMP) {
+ if (is_tst_cond(cond)) {
+ ct |= TCG_CT_CONST_LIMM;
+ } else {
+ ct |= TCG_CT_CONST_AIMM;
+ }
+ }
+
if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
return 1;
}
@@ -345,6 +355,9 @@ static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
[TCG_COND_GTU] = COND_HI,
[TCG_COND_GEU] = COND_HS,
[TCG_COND_LEU] = COND_LS,
+ /* bit test */
+ [TCG_COND_TSTEQ] = COND_EQ,
+ [TCG_COND_TSTNE] = COND_NE,
};
typedef enum {
@@ -1342,19 +1355,26 @@ static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
tcg_out_bfm(s, ext, rd, rn, a, b);
}
-static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
+static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a,
tcg_target_long b, bool const_b)
{
- if (const_b) {
- /* Using CMP or CMN aliases. */
- if (b >= 0) {
- tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
+ if (is_tst_cond(cond)) {
+ if (!const_b) {
+ tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
} else {
- tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
+ tcg_debug_assert(is_limm(b));
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, a, b);
}
} else {
- /* Using CMP alias SUBS wzr, Wn, Wm */
- tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
+ if (!const_b) {
+ tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
+ } else if (b >= 0) {
+ tcg_debug_assert(is_aimm(b));
+ tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
+ } else {
+ tcg_debug_assert(is_aimm(-b));
+ tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
+ }
}
}
@@ -1402,7 +1422,7 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
need_cmp = false;
} else {
need_cmp = true;
- tcg_out_cmp(s, ext, a, b, b_const);
+ tcg_out_cmp(s, ext, c, a, b, b_const);
}
if (!l->has_value) {
@@ -1575,7 +1595,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
} else {
AArch64Insn sel = I3506_CSEL;
- tcg_out_cmp(s, ext, a0, 0, 1);
+ tcg_out_cmp(s, ext, TCG_COND_NE, a0, 0, 1);
tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1);
if (const_b) {
@@ -1720,7 +1740,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
addr_adj, compare_mask);
/* Perform the address comparison. */
- tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0);
+ tcg_out_cmp(s, addr_type, TCG_COND_NE, TCG_REG_TMP0, TCG_REG_TMP2, 0);
/* If not equal, we jump to the slow path. */
ldst->label_ptr[0] = s->code_ptr;
@@ -2276,7 +2296,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
a2 = (int32_t)a2;
/* FALLTHRU */
case INDEX_op_setcond_i64:
- tcg_out_cmp(s, ext, a1, a2, c2);
+ tcg_out_cmp(s, ext, args[3], a1, a2, c2);
/* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
TCG_REG_XZR, tcg_invert_cond(args[3]));
@@ -2286,7 +2306,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
a2 = (int32_t)a2;
/* FALLTHRU */
case INDEX_op_negsetcond_i64:
- tcg_out_cmp(s, ext, a1, a2, c2);
+ tcg_out_cmp(s, ext, args[3], a1, a2, c2);
/* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR,
TCG_REG_XZR, tcg_invert_cond(args[3]));
@@ -2296,7 +2316,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
a2 = (int32_t)a2;
/* FALLTHRU */
case INDEX_op_movcond_i64:
- tcg_out_cmp(s, ext, a1, a2, c2);
+ tcg_out_cmp(s, ext, args[5], a1, a2, c2);
tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
break;
@@ -2896,11 +2916,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_add_i64:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
+ return C_O1_I2(r, r, rA);
+
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
case INDEX_op_negsetcond_i32:
case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, r, rA);
+ return C_O1_I2(r, r, rC);
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
@@ -2950,11 +2972,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
- return C_O0_I2(r, rA);
+ return C_O0_I2(r, rC);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rA, rZ, rZ);
+ return C_O1_I4(r, r, rC, rZ, rZ);
case INDEX_op_qemu_ld_a32_i32:
case INDEX_op_qemu_ld_a64_i32:
--
2.34.1
next prev parent reply other threads:[~2024-02-04 21:41 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-04 21:40 [PULL 00/39] tcg patch queue Richard Henderson
2024-02-04 21:40 ` [PULL 01/39] tcg: Introduce TCG_COND_TST{EQ,NE} Richard Henderson
2024-02-04 21:40 ` [PULL 02/39] tcg: Introduce TCG_TARGET_HAS_tst Richard Henderson
2024-02-04 21:40 ` [PULL 03/39] tcg/optimize: Split out arg_is_const_val Richard Henderson
2024-02-04 21:40 ` [PULL 04/39] tcg/optimize: Split out do_constant_folding_cond1 Richard Henderson
2024-02-04 21:40 ` [PULL 05/39] tcg/optimize: Do swap_commutative2 in do_constant_folding_cond2 Richard Henderson
2024-02-04 21:40 ` [PULL 06/39] tcg/optimize: Handle TCG_COND_TST{EQ,NE} Richard Henderson
2024-02-04 21:40 ` [PULL 07/39] tcg/optimize: Lower TCG_COND_TST{EQ,NE} if unsupported Richard Henderson
2024-02-04 21:40 ` [PULL 08/39] target/alpha: Pass immediate value to gen_bcond_internal() Richard Henderson
2024-02-04 21:40 ` [PULL 09/39] target/alpha: Use TCG_COND_TST{EQ,NE} for BLB{C,S} Richard Henderson
2024-02-04 21:40 ` [PULL 10/39] target/alpha: Use TCG_COND_TST{EQ,NE} for CMOVLB{C,S} Richard Henderson
2024-02-04 21:40 ` [PULL 11/39] target/alpha: Use TCG_COND_TSTNE for gen_fold_mzero Richard Henderson
2024-02-04 21:40 ` [PULL 12/39] target/m68k: Use TCG_COND_TST{EQ,NE} in gen_fcc_cond Richard Henderson
2024-02-04 21:40 ` [PULL 13/39] target/sparc: Use TCG_COND_TSTEQ in gen_op_mulscc Richard Henderson
2024-02-04 21:40 ` [PULL 14/39] target/s390x: Use TCG_COND_TSTNE for CC_OP_{TM,ICM} Richard Henderson
2024-02-04 21:40 ` [PULL 15/39] target/s390x: Improve general case of disas_jcc Richard Henderson
2024-02-04 21:40 ` [PULL 16/39] tcg: Add TCGConst argument to tcg_target_const_match Richard Henderson
2024-02-04 21:40 ` Richard Henderson [this message]
2024-02-04 21:40 ` [PULL 18/39] tcg/aarch64: Massage tcg_out_brcond() Richard Henderson
2024-02-04 21:40 ` [PULL 19/39] tcg/aarch64: Generate TBZ, TBNZ Richard Henderson
2024-02-04 21:40 ` [PULL 20/39] tcg/aarch64: Generate CBNZ for TSTNE of UINT32_MAX Richard Henderson
2024-02-04 21:40 ` [PULL 21/39] tcg/arm: Split out tcg_out_cmp() Richard Henderson
2024-02-04 21:40 ` [PULL 22/39] tcg/arm: Support TCG_COND_TST{EQ,NE} Richard Henderson
2024-02-04 21:40 ` [PULL 23/39] tcg/i386: Pass x86 condition codes to tcg_out_cmov Richard Henderson
2024-02-04 21:40 ` [PULL 24/39] tcg/i386: Move tcg_cond_to_jcc[] into tcg_out_cmp Richard Henderson
2024-02-04 21:40 ` [PULL 25/39] tcg/i386: Support TCG_COND_TST{EQ,NE} Richard Henderson
2024-02-04 21:40 ` [PULL 26/39] tcg/i386: Improve TSTNE/TESTEQ vs powers of two Richard Henderson
2024-02-04 21:40 ` [PULL 27/39] tcg/i386: Use TEST r,r to test 8/16/32 bits Richard Henderson
2024-02-04 21:40 ` [PULL 28/39] tcg/sparc64: Hoist read of tcg_cond_to_rcond Richard Henderson
2024-02-04 21:40 ` [PULL 29/39] tcg/sparc64: Pass TCGCond to tcg_out_cmp Richard Henderson
2024-02-04 21:40 ` [PULL 30/39] tcg/sparc64: Support TCG_COND_TST{EQ,NE} Richard Henderson
2024-02-04 21:40 ` [PULL 31/39] tcg/ppc: Sink tcg_to_bc usage into tcg_out_bc Richard Henderson
2024-02-04 21:40 ` [PULL 32/39] tcg/ppc: Use cr0 in tcg_to_bc and tcg_to_isel Richard Henderson
2024-02-04 21:40 ` [PULL 33/39] tcg/ppc: Tidy up tcg_target_const_match Richard Henderson
2024-02-04 21:40 ` [PULL 34/39] tcg/ppc: Add TCG_CT_CONST_CMP Richard Henderson
2024-02-04 21:40 ` [PULL 35/39] tcg/ppc: Support TCG_COND_TST{EQ,NE} Richard Henderson
2024-02-04 21:40 ` [PULL 36/39] tcg/s390x: Split constraint A into J+U Richard Henderson
2024-02-04 21:40 ` [PULL 37/39] tcg/s390x: Add TCG_CT_CONST_CMP Richard Henderson
2024-02-04 21:40 ` [PULL 38/39] tcg/s390x: Support TCG_COND_TST{EQ,NE} Richard Henderson
2024-02-04 21:40 ` [PULL 39/39] tcg/tci: " Richard Henderson
2024-02-05 12:59 ` [PULL 00/39] tcg patch queue Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240204214052.5639-18-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).