From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PATCH 01/20] tcg/optimize: Fix folding of vector ops
Date: Sat, 18 Dec 2021 11:42:31 -0800 [thread overview]
Message-ID: <20211218194250.247633-2-richard.henderson@linaro.org> (raw)
In-Reply-To: <20211218194250.247633-1-richard.henderson@linaro.org>
Bitwise operations are easy to fold, because the operation is
identical regardess of element size. But add and sub need
extra element size info that is not currently propagated.
Fixes: 2f9f08ba43d
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 49 ++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 38 insertions(+), 11 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 2397f2cf93..e573000951 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -308,13 +308,13 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
CASE_OP_32_64(mul):
return x * y;
- CASE_OP_32_64(and):
+ CASE_OP_32_64_VEC(and):
return x & y;
- CASE_OP_32_64(or):
+ CASE_OP_32_64_VEC(or):
return x | y;
- CASE_OP_32_64(xor):
+ CASE_OP_32_64_VEC(xor):
return x ^ y;
case INDEX_op_shl_i32:
@@ -347,16 +347,16 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
case INDEX_op_rotl_i64:
return rol64(x, y & 63);
- CASE_OP_32_64(not):
+ CASE_OP_32_64_VEC(not):
return ~x;
CASE_OP_32_64(neg):
return -x;
- CASE_OP_32_64(andc):
+ CASE_OP_32_64_VEC(andc):
return x & ~y;
- CASE_OP_32_64(orc):
+ CASE_OP_32_64_VEC(orc):
return x | ~y;
CASE_OP_32_64(eqv):
@@ -751,6 +751,12 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_commutative(OptContext *ctx, TCGOp *op)
+{
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
+ return false;
+}
+
static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
{
swap_commutative(op->args[0], &op->args[1], &op->args[2]);
@@ -905,6 +911,16 @@ static bool fold_add(OptContext *ctx, TCGOp *op)
return false;
}
+/* We cannot as yet do_constant_folding with vectors. */
+static bool fold_add_vec(OptContext *ctx, TCGOp *op)
+{
+ if (fold_commutative(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0)) {
+ return true;
+ }
+ return false;
+}
+
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
{
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
@@ -1938,10 +1954,10 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_sub(OptContext *ctx, TCGOp *op)
+/* We cannot as yet do_constant_folding with vectors. */
+static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0) ||
+ if (fold_xx_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0) ||
fold_sub_to_neg(ctx, op)) {
return true;
@@ -1949,6 +1965,11 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_sub(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op) || fold_sub_vec(ctx, op);
+}
+
static bool fold_sub2(OptContext *ctx, TCGOp *op)
{
return fold_addsub2(ctx, op, false);
@@ -2052,9 +2073,12 @@ void tcg_optimize(TCGContext *s)
* Sorted alphabetically by opcode as much as possible.
*/
switch (opc) {
- CASE_OP_32_64_VEC(add):
+ CASE_OP_32_64(add):
done = fold_add(&ctx, op);
break;
+ case INDEX_op_add_vec:
+ done = fold_add_vec(&ctx, op);
+ break;
CASE_OP_32_64(add2):
done = fold_add2(&ctx, op);
break;
@@ -2193,9 +2217,12 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(sextract):
done = fold_sextract(&ctx, op);
break;
- CASE_OP_32_64_VEC(sub):
+ CASE_OP_32_64(sub):
done = fold_sub(&ctx, op);
break;
+ case INDEX_op_sub_vec:
+ done = fold_sub_vec(&ctx, op);
+ break;
CASE_OP_32_64(sub2):
done = fold_sub2(&ctx, op);
break;
--
2.25.1
next prev parent reply other threads:[~2021-12-18 19:44 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-18 19:42 [PATCH 00/20] tcg: vector improvements Richard Henderson
2021-12-18 19:42 ` Richard Henderson [this message]
2021-12-19 11:37 ` [PATCH 01/20] tcg/optimize: Fix folding of vector ops Philippe Mathieu-Daudé
2021-12-18 19:42 ` [PATCH 02/20] tcg: Add opcodes for vector nand, nor, eqv Richard Henderson
2021-12-19 11:28 ` Philippe Mathieu-Daudé
2022-02-01 18:28 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 03/20] tcg/ppc: Implement vector NAND, NOR, EQV Richard Henderson
2021-12-19 0:15 ` Philippe Mathieu-Daudé
2022-02-01 18:29 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 04/20] tcg/s390x: " Richard Henderson
2021-12-19 0:17 ` Philippe Mathieu-Daudé
2022-02-01 18:29 ` Alex Bennée
2022-02-01 18:31 ` Alex Bennée
2024-01-03 13:21 ` Philippe Mathieu-Daudé
2024-01-03 21:58 ` Richard Henderson
2021-12-18 19:42 ` [PATCH 05/20] tcg/i386: Detect AVX512 Richard Henderson
2022-02-01 18:41 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 06/20] tcg/i386: Add tcg_out_evex_opc Richard Henderson
2022-02-01 19:20 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 07/20] tcg/i386: Use tcg_can_emit_vec_op in expand_vec_cmp_noinv Richard Henderson
2022-02-01 19:21 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 08/20] tcg/i386: Implement avx512 variable shifts Richard Henderson
2022-02-01 20:33 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 09/20] tcg/i386: Implement avx512 scalar shift Richard Henderson
2022-02-02 13:48 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 10/20] tcg/i386: Implement avx512 immediate sari shift Richard Henderson
2022-02-02 14:02 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 11/20] tcg/i386: Implement avx512 immediate rotate Richard Henderson
2022-02-02 14:05 ` Alex Bennée
2022-02-03 1:26 ` Richard Henderson
2021-12-18 19:42 ` [PATCH 12/20] tcg/i386: Implement avx512 variable rotate Richard Henderson
2022-02-02 14:14 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 13/20] tcg/i386: Support avx512vbmi2 vector shift-double instructions Richard Henderson
2022-02-02 14:28 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 14/20] tcg/i386: Expand vector word rotate as avx512vbmi2 shift-double Richard Henderson
2022-02-03 10:32 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 15/20] tcg/i386: Remove rotls_vec from tcg_target_op_def Richard Henderson
2022-02-03 10:34 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 16/20] tcg/i386: Expand scalar rotate with avx512 insns Richard Henderson
2022-02-03 10:38 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 17/20] tcg/i386: Implement avx512 min/max/abs Richard Henderson
2022-02-03 10:44 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 18/20] tcg/i386: Implement avx512 multiply Richard Henderson
2022-02-03 10:45 ` Alex Bennée
2021-12-18 19:42 ` [PATCH 19/20] tcg/i386: Implement more logical operations for avx512 Richard Henderson
2022-02-03 10:46 ` Alex Bennée
2022-02-03 21:54 ` Richard Henderson
2021-12-18 19:42 ` [PATCH 20/20] tcg/i386: Implement bitsel " Richard Henderson
2022-02-03 10:51 ` Alex Bennée
2022-01-29 9:28 ` [PATCH 00/20] tcg: vector improvements Richard Henderson
2022-02-03 10:25 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211218194250.247633-2-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).