From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: Luis Pires <luis.pires@eldorado.org.br>
Subject: [PULL v2 39/60] tcg/optimize: Split out fold_to_not
Date: Thu, 28 Oct 2021 21:33:08 -0700 [thread overview]
Message-ID: <20211029043329.1518029-40-richard.henderson@linaro.org> (raw)
In-Reply-To: <20211029043329.1518029-1-richard.henderson@linaro.org>
Split out the conditional conversion from a more complex logical
operation to a simple NOT. Create a couple more helpers to make
this easy for the outer-most logical operations.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
1 file changed, 86 insertions(+), 72 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e869fa7e78..21f4251b4f 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -694,6 +694,52 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+/*
+ * Convert @op to NOT, if NOT is supported by the host.
+ * Return true f the conversion is successful, which will still
+ * indicate that the processing is complete.
+ */
+static bool fold_not(OptContext *ctx, TCGOp *op);
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
+{
+ TCGOpcode not_op;
+ bool have_not;
+
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ not_op = INDEX_op_not_i32;
+ have_not = TCG_TARGET_HAS_not_i32;
+ break;
+ case TCG_TYPE_I64:
+ not_op = INDEX_op_not_i64;
+ have_not = TCG_TARGET_HAS_not_i64;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ not_op = INDEX_op_not_vec;
+ have_not = TCG_TARGET_HAS_not_vec;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (have_not) {
+ op->opc = not_op;
+ op->args[1] = op->args[idx];
+ return fold_not(ctx, op);
+ }
+ return false;
+}
+
+/* If the binary operation has first argument @i, fold to NOT. */
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
+ return fold_to_not(ctx, op, 2);
+ }
+ return false;
+}
+
/* If the binary operation has second argument @i, fold to @i. */
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -703,6 +749,15 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
return false;
}
+/* If the binary operation has second argument @i, fold to NOT. */
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ return fold_to_not(ctx, op, 1);
+ }
+ return false;
+}
+
/* If the binary operation has both arguments equal, fold to @i. */
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -781,7 +836,8 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
static bool fold_andc(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0)) {
+ fold_xx_to_i(ctx, op, 0) ||
+ fold_ix_to_not(ctx, op, -1)) {
return true;
}
return false;
@@ -987,7 +1043,11 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_not(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_extract(OptContext *ctx, TCGOp *op)
@@ -1134,7 +1194,11 @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
static bool fold_nand(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_not(ctx, op, -1)) {
+ return true;
+ }
+ return false;
}
static bool fold_neg(OptContext *ctx, TCGOp *op)
@@ -1144,12 +1208,22 @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
static bool fold_nor(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_not(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_not(OptContext *ctx, TCGOp *op)
{
- return fold_const1(ctx, op);
+ if (fold_const1(ctx, op)) {
+ return true;
+ }
+
+ /* Because of fold_to_not, we want to always return true, via finish. */
+ finish_folding(ctx, op);
+ return true;
}
static bool fold_or(OptContext *ctx, TCGOp *op)
@@ -1163,7 +1237,11 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
static bool fold_orc(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_ix_to_not(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
@@ -1299,7 +1377,8 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0)) {
+ fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_not(ctx, op, -1)) {
return true;
}
return false;
@@ -1458,71 +1537,6 @@ void tcg_optimize(TCGContext *s)
}
}
break;
- CASE_OP_32_64_VEC(xor):
- CASE_OP_32_64(nand):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == -1) {
- i = 1;
- goto try_not;
- }
- break;
- CASE_OP_32_64(nor):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0) {
- i = 1;
- goto try_not;
- }
- break;
- CASE_OP_32_64_VEC(andc):
- if (!arg_is_const(op->args[2])
- && arg_is_const(op->args[1])
- && arg_info(op->args[1])->val == -1) {
- i = 2;
- goto try_not;
- }
- break;
- CASE_OP_32_64_VEC(orc):
- CASE_OP_32_64(eqv):
- if (!arg_is_const(op->args[2])
- && arg_is_const(op->args[1])
- && arg_info(op->args[1])->val == 0) {
- i = 2;
- goto try_not;
- }
- break;
- try_not:
- {
- TCGOpcode not_op;
- bool have_not;
-
- switch (ctx.type) {
- case TCG_TYPE_I32:
- not_op = INDEX_op_not_i32;
- have_not = TCG_TARGET_HAS_not_i32;
- break;
- case TCG_TYPE_I64:
- not_op = INDEX_op_not_i64;
- have_not = TCG_TARGET_HAS_not_i64;
- break;
- case TCG_TYPE_V64:
- case TCG_TYPE_V128:
- case TCG_TYPE_V256:
- not_op = INDEX_op_not_vec;
- have_not = TCG_TARGET_HAS_not_vec;
- break;
- default:
- g_assert_not_reached();
- }
- if (!have_not) {
- break;
- }
- op->opc = not_op;
- reset_temp(op->args[0]);
- op->args[1] = op->args[i];
- continue;
- }
default:
break;
}
--
2.25.1
next prev parent reply other threads:[~2021-10-29 5:21 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-29 4:32 [PULL v2 00/60] tcg patch queue Richard Henderson
2021-10-29 4:32 ` [PULL v2 01/60] qemu/int128: Add int128_{not,xor} Richard Henderson
2021-10-29 4:32 ` [PULL v2 02/60] host-utils: move checks out of divu128/divs128 Richard Henderson
2021-10-29 4:32 ` [PULL v2 03/60] host-utils: move udiv_qrnnd() to host-utils Richard Henderson
2021-10-29 4:32 ` [PULL v2 04/60] host-utils: add 128-bit quotient support to divu128/divs128 Richard Henderson
2021-10-29 4:32 ` [PULL v2 05/60] host-utils: add unit tests for divu128/divs128 Richard Henderson
2021-10-29 4:32 ` [PULL v2 06/60] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
2021-10-29 4:32 ` [PULL v2 07/60] tcg/optimize: Split out OptContext Richard Henderson
2021-10-29 4:32 ` [PULL v2 08/60] tcg/optimize: Remove do_default label Richard Henderson
2021-10-29 4:32 ` [PULL v2 09/60] tcg/optimize: Change tcg_opt_gen_{mov, movi} interface Richard Henderson
2021-10-29 4:32 ` [PULL v2 10/60] tcg/optimize: Move prev_mb into OptContext Richard Henderson
2021-10-29 4:32 ` [PULL v2 11/60] tcg/optimize: Split out init_arguments Richard Henderson
2021-10-29 4:32 ` [PULL v2 12/60] tcg/optimize: Split out copy_propagate Richard Henderson
2021-10-29 4:32 ` [PULL v2 13/60] tcg/optimize: Split out fold_call Richard Henderson
2021-10-29 4:32 ` [PULL v2 14/60] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
2021-10-29 4:32 ` [PULL v2 15/60] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
2021-10-29 4:32 ` [PULL v2 16/60] tcg/optimize: Return true from tcg_opt_gen_{mov, movi} Richard Henderson
2021-10-29 4:32 ` [PULL v2 17/60] tcg/optimize: Split out finish_folding Richard Henderson
2021-10-29 4:32 ` [PULL v2 18/60] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
2021-10-29 4:32 ` [PULL v2 19/60] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
2021-10-29 4:32 ` [PULL v2 20/60] tcg/optimize: Split out fold_const{1,2} Richard Henderson
2021-10-29 4:32 ` [PULL v2 21/60] tcg/optimize: Split out fold_setcond2 Richard Henderson
2021-10-29 4:32 ` [PULL v2 22/60] tcg/optimize: Split out fold_brcond2 Richard Henderson
2021-10-29 4:32 ` [PULL v2 23/60] tcg/optimize: Split out fold_brcond Richard Henderson
2021-10-29 4:32 ` [PULL v2 24/60] tcg/optimize: Split out fold_setcond Richard Henderson
2021-10-29 4:32 ` [PULL v2 25/60] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
2021-10-29 4:32 ` [PULL v2 26/60] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
2021-10-29 4:32 ` [PULL v2 27/60] tcg/optimize: Split out fold_movcond Richard Henderson
2021-10-29 4:32 ` [PULL v2 28/60] tcg/optimize: Split out fold_extract2 Richard Henderson
2021-11-09 16:52 ` Peter Maydell
2021-11-09 17:22 ` Richard Henderson
2021-10-29 4:32 ` [PULL v2 29/60] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
2021-10-29 4:32 ` [PULL v2 30/60] tcg/optimize: Split out fold_deposit Richard Henderson
2021-10-29 4:33 ` [PULL v2 31/60] tcg/optimize: Split out fold_count_zeros Richard Henderson
2021-10-29 4:33 ` [PULL v2 32/60] tcg/optimize: Split out fold_bswap Richard Henderson
2021-10-29 4:33 ` [PULL v2 33/60] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
2021-10-29 4:33 ` [PULL v2 34/60] tcg/optimize: Split out fold_mov Richard Henderson
2021-10-29 4:33 ` [PULL v2 35/60] tcg/optimize: Split out fold_xx_to_i Richard Henderson
2021-10-29 4:33 ` [PULL v2 36/60] tcg/optimize: Split out fold_xx_to_x Richard Henderson
2021-10-29 4:33 ` [PULL v2 37/60] tcg/optimize: Split out fold_xi_to_i Richard Henderson
2021-10-29 4:33 ` [PULL v2 38/60] tcg/optimize: Add type to OptContext Richard Henderson
2021-10-29 4:33 ` Richard Henderson [this message]
2021-10-29 4:33 ` [PULL v2 40/60] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
2021-10-29 4:33 ` [PULL v2 41/60] tcg/optimize: Split out fold_xi_to_x Richard Henderson
2021-10-29 4:33 ` [PULL v2 42/60] tcg/optimize: Split out fold_ix_to_i Richard Henderson
2021-10-29 4:33 ` [PULL v2 43/60] tcg/optimize: Split out fold_masks Richard Henderson
2021-10-29 4:33 ` [PULL v2 44/60] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
2021-10-29 4:33 ` [PULL v2 45/60] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Richard Henderson
2021-10-29 4:33 ` [PULL v2 46/60] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
2021-10-29 4:33 ` [PULL v2 47/60] tcg: Extend call args using the correct opcodes Richard Henderson
2021-10-29 4:33 ` [PULL v2 48/60] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
2021-10-29 4:33 ` [PULL v2 49/60] tcg/optimize: Use fold_xx_to_i for orc Richard Henderson
2021-10-29 4:33 ` [PULL v2 50/60] tcg/optimize: Use fold_xi_to_x for mul Richard Henderson
2021-10-29 4:33 ` [PULL v2 51/60] tcg/optimize: Use fold_xi_to_x for div Richard Henderson
2021-10-29 4:33 ` [PULL v2 52/60] tcg/optimize: Use fold_xx_to_i for rem Richard Henderson
2021-10-29 4:33 ` [PULL v2 53/60] tcg/optimize: Optimize sign extensions Richard Henderson
2021-10-29 4:33 ` [PULL v2 54/60] tcg/optimize: Propagate sign info for logical operations Richard Henderson
2021-10-29 4:33 ` [PULL v2 55/60] tcg/optimize: Propagate sign info for setcond Richard Henderson
2021-10-29 4:33 ` [PULL v2 56/60] tcg/optimize: Propagate sign info for bit counting Richard Henderson
2021-10-29 4:33 ` [PULL v2 57/60] tcg/optimize: Propagate sign info for shifting Richard Henderson
2021-10-29 4:33 ` [PULL v2 58/60] softmmu: fix watchpoint processing in icount mode Richard Henderson
2021-10-29 4:33 ` [PULL v2 59/60] softmmu: remove useless condition in watchpoint check Richard Henderson
2021-10-29 4:33 ` [PULL v2 60/60] softmmu: fix for "after access" watchpoints Richard Henderson
2021-10-29 17:58 ` [PULL v2 00/60] tcg patch queue Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211029043329.1518029-40-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=luis.pires@eldorado.org.br \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).