From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: Song Gao <gaosong@loongson.cn>
Subject: [PATCH 29/35] tcg/optimize: Optimize env memory operations
Date: Mon, 6 Nov 2023 18:48:36 -0800 [thread overview]
Message-ID: <20231107024842.7650-30-richard.henderson@linaro.org> (raw)
In-Reply-To: <20231107024842.7650-1-richard.henderson@linaro.org>
Propagate stores to loads, loads to loads.
Reviewed-by: Song Gao <gaosong@loongson.cn>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 264 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 243 insertions(+), 21 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 118561f56d..b32ef0be0f 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -25,6 +25,7 @@
#include "qemu/osdep.h"
#include "qemu/int128.h"
+#include "qemu/interval-tree.h"
#include "tcg/tcg-op-common.h"
#include "tcg-internal.h"
@@ -37,10 +38,18 @@
glue(glue(case INDEX_op_, x), _i64): \
glue(glue(case INDEX_op_, x), _vec)
+typedef struct MemCopyInfo {
+ IntervalTreeNode itree;
+ QSIMPLEQ_ENTRY (MemCopyInfo) next;
+ TCGTemp *ts;
+ TCGType type;
+} MemCopyInfo;
+
typedef struct TempOptInfo {
bool is_const;
TCGTemp *prev_copy;
TCGTemp *next_copy;
+ QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
uint64_t val;
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
@@ -51,6 +60,9 @@ typedef struct OptContext {
TCGOp *prev_mb;
TCGTempSet temps_used;
+ IntervalTreeRoot mem_copy;
+ QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
+
/* In flight values from optimization. */
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
@@ -127,27 +139,6 @@ static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
return a->kind < b->kind ? b : a;
}
-/* Reset TEMP's state, possibly removing the temp for the list of copies. */
-static void reset_ts(OptContext *ctx, TCGTemp *ts)
-{
- TempOptInfo *ti = ts_info(ts);
- TempOptInfo *pi = ts_info(ti->prev_copy);
- TempOptInfo *ni = ts_info(ti->next_copy);
-
- ni->prev_copy = ti->prev_copy;
- pi->next_copy = ti->next_copy;
- ti->next_copy = ts;
- ti->prev_copy = ts;
- ti->is_const = false;
- ti->z_mask = -1;
- ti->s_mask = 0;
-}
-
-static void reset_temp(OptContext *ctx, TCGArg arg)
-{
- reset_ts(ctx, arg_temp(arg));
-}
-
/* Initialize and activate a temporary. */
static void init_ts_info(OptContext *ctx, TCGTemp *ts)
{
@@ -167,6 +158,7 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
ti->next_copy = ts;
ti->prev_copy = ts;
+ QSIMPLEQ_INIT(&ti->mem_copy);
if (ts->kind == TEMP_CONST) {
ti->is_const = true;
ti->val = ts->val;
@@ -179,6 +171,45 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
}
}
+static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
+{
+ IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
+ return r ? container_of(r, MemCopyInfo, itree) : NULL;
+}
+
+static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
+{
+ IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
+ return r ? container_of(r, MemCopyInfo, itree) : NULL;
+}
+
+static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
+{
+ TCGTemp *ts = mc->ts;
+ TempOptInfo *ti = ts_info(ts);
+
+ interval_tree_remove(&mc->itree, &ctx->mem_copy);
+ QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
+ QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
+}
+
+static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
+{
+ while (true) {
+ MemCopyInfo *mc = mem_copy_first(ctx, s, l);
+ if (!mc) {
+ break;
+ }
+ remove_mem_copy(ctx, mc);
+ }
+}
+
+static void remove_mem_copy_all(OptContext *ctx)
+{
+ remove_mem_copy_in(ctx, 0, -1);
+ tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
+}
+
static TCGTemp *find_better_copy(TCGTemp *ts)
{
TCGTemp *i, *ret;
@@ -195,6 +226,80 @@ static TCGTemp *find_better_copy(TCGTemp *ts)
return ret;
}
+static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
+{
+ TempOptInfo *si = ts_info(src_ts);
+ TempOptInfo *di = ts_info(dst_ts);
+ MemCopyInfo *mc;
+
+ QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
+ tcg_debug_assert(mc->ts == src_ts);
+ mc->ts = dst_ts;
+ }
+ QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
+}
+
+/* Reset TEMP's state, possibly removing the temp for the list of copies. */
+static void reset_ts(OptContext *ctx, TCGTemp *ts)
+{
+ TempOptInfo *ti = ts_info(ts);
+ TCGTemp *pts = ti->prev_copy;
+ TCGTemp *nts = ti->next_copy;
+ TempOptInfo *pi = ts_info(pts);
+ TempOptInfo *ni = ts_info(nts);
+
+ ni->prev_copy = ti->prev_copy;
+ pi->next_copy = ti->next_copy;
+ ti->next_copy = ts;
+ ti->prev_copy = ts;
+ ti->is_const = false;
+ ti->z_mask = -1;
+ ti->s_mask = 0;
+
+ if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
+ if (ts == nts) {
+ /* Last temp copy being removed, the mem copies die. */
+ MemCopyInfo *mc;
+ QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
+ interval_tree_remove(&mc->itree, &ctx->mem_copy);
+ }
+ QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
+ } else {
+ move_mem_copies(find_better_copy(nts), ts);
+ }
+ }
+}
+
+static void reset_temp(OptContext *ctx, TCGArg arg)
+{
+ reset_ts(ctx, arg_temp(arg));
+}
+
+static void record_mem_copy(OptContext *ctx, TCGType type,
+ TCGTemp *ts, intptr_t start, intptr_t last)
+{
+ MemCopyInfo *mc;
+ TempOptInfo *ti;
+
+ mc = QSIMPLEQ_FIRST(&ctx->mem_free);
+ if (mc) {
+ QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
+ } else {
+ mc = tcg_malloc(sizeof(*mc));
+ }
+
+ memset(mc, 0, sizeof(*mc));
+ mc->itree.start = start;
+ mc->itree.last = last;
+ mc->type = type;
+ interval_tree_insert(&mc->itree, &ctx->mem_copy);
+
+ ts = find_better_copy(ts);
+ ti = ts_info(ts);
+ mc->ts = ts;
+ QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
+}
+
static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
{
TCGTemp *i;
@@ -221,6 +326,18 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
}
+static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
+{
+ MemCopyInfo *mc;
+
+ for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
+ if (mc->itree.start == s && mc->type == type) {
+ return find_better_copy(mc->ts);
+ }
+ }
+ return NULL;
+}
+
static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
@@ -270,6 +387,11 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
si->next_copy = dst_ts;
di->is_const = si->is_const;
di->val = si->val;
+
+ if (!QSIMPLEQ_EMPTY(&si->mem_copy)
+ && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
+ move_mem_copies(dst_ts, src_ts);
+ }
}
return true;
}
@@ -688,6 +810,7 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
ctx->prev_mb = NULL;
if (!(def->flags & TCG_OPF_COND_BRANCH)) {
memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
+ remove_mem_copy_all(ctx);
}
return;
}
@@ -1213,6 +1336,11 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
}
}
+ /* If the function has side effects, reset mem data. */
+ if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
+ remove_mem_copy_all(ctx);
+ }
+
/* Reset temp data for outputs. */
for (i = 0; i < nb_oargs; i++) {
reset_temp(ctx, op->args[i]);
@@ -2070,6 +2198,83 @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
+{
+ TCGTemp *dst, *src;
+ intptr_t ofs;
+ TCGType type;
+
+ if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
+ return false;
+ }
+
+ type = ctx->type;
+ ofs = op->args[2];
+ dst = arg_temp(op->args[0]);
+ src = find_mem_copy_for(ctx, type, ofs);
+ if (src && src->base_type == type) {
+ return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
+ }
+
+ reset_ts(ctx, dst);
+ record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
+ return true;
+}
+
+static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
+{
+ intptr_t ofs = op->args[2];
+ intptr_t lm1;
+
+ if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
+ remove_mem_copy_all(ctx);
+ return false;
+ }
+
+ switch (op->opc) {
+ CASE_OP_32_64(st8):
+ lm1 = 0;
+ break;
+ CASE_OP_32_64(st16):
+ lm1 = 1;
+ break;
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i32:
+ lm1 = 3;
+ break;
+ case INDEX_op_st_i64:
+ lm1 = 7;
+ break;
+ case INDEX_op_st_vec:
+ lm1 = tcg_type_size(ctx->type) - 1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ remove_mem_copy_in(ctx, ofs, ofs + lm1);
+ return false;
+}
+
+static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
+{
+ TCGTemp *src;
+ intptr_t ofs, last;
+ TCGType type;
+
+ if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
+ fold_tcg_st(ctx, op);
+ return false;
+ }
+
+ src = arg_temp(op->args[0]);
+ ofs = op->args[2];
+ type = ctx->type;
+ last = ofs + tcg_type_size(type) - 1;
+ remove_mem_copy_in(ctx, ofs, last);
+ record_mem_copy(ctx, type, src, ofs, last);
+ return false;
+}
+
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
if (fold_const2_commutative(ctx, op) ||
@@ -2093,6 +2298,8 @@ void tcg_optimize(TCGContext *s)
TCGOp *op, *op_next;
OptContext ctx = { .tcg = s };
+ QSIMPLEQ_INIT(&ctx.mem_free);
+
/* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element.
If this temp is a copy of other ones then the other copies are
@@ -2214,6 +2421,21 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_ld32u_i64:
done = fold_tcg_ld(&ctx, op);
break;
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld_i64:
+ case INDEX_op_ld_vec:
+ done = fold_tcg_ld_memcopy(&ctx, op);
+ break;
+ CASE_OP_32_64(st8):
+ CASE_OP_32_64(st16):
+ case INDEX_op_st32_i64:
+ done = fold_tcg_st(&ctx, op);
+ break;
+ case INDEX_op_st_i32:
+ case INDEX_op_st_i64:
+ case INDEX_op_st_vec:
+ done = fold_tcg_st_memcopy(&ctx, op);
+ break;
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
--
2.34.1
next prev parent reply other threads:[~2023-11-07 2:53 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-07 2:48 [PATCH 00/35] tcg patch queue Richard Henderson
2023-11-07 2:48 ` [PATCH 01/35] accel/tcg: Move HMP info jit and info opcount code Richard Henderson
2023-11-07 2:48 ` [PATCH 02/35] tcg: Add C_N2_I1 Richard Henderson
2023-11-07 2:48 ` [PATCH 03/35] tcg/loongarch64: Use C_N2_I1 for INDEX_op_qemu_ld_a*_i128 Richard Henderson
2023-11-07 2:48 ` [PATCH 04/35] util: Add cpuinfo for loongarch64 Richard Henderson
2023-11-07 2:48 ` [PATCH 05/35] tcg/loongarch64: Use cpuinfo.h Richard Henderson
2023-11-07 2:48 ` [PATCH 06/35] host/include/loongarch64: Add atomic16 load and store Richard Henderson
2023-11-07 2:48 ` [PATCH 07/35] accel/tcg: Remove redundant case in store_atom_16 Richard Henderson
2023-11-07 2:48 ` [PATCH 08/35] accel/tcg: Fix condition for store_atom_insert_al16 Richard Henderson
2023-11-07 2:48 ` [PATCH 09/35] tcg: Mark tcg_gen_op* as noinline Richard Henderson
2023-11-07 2:48 ` [PATCH 10/35] tcg: Move tcg_gen_op* out of line Richard Henderson
2023-11-07 2:48 ` [PATCH 11/35] tcg: Move generic expanders " Richard Henderson
2023-11-07 2:48 ` [PATCH 12/35] tcg: Move 32-bit " Richard Henderson
2023-11-07 2:48 ` [PATCH 13/35] tcg: Move 64-bit " Richard Henderson
2023-11-07 2:48 ` [PATCH 14/35] tcg: Move vec_gen_* declarations to tcg-internal.h Richard Henderson
2023-11-07 2:48 ` [PATCH 15/35] tcg: Move tcg_gen_opN " Richard Henderson
2023-11-07 2:48 ` [PATCH 16/35] tcg: Unexport tcg_gen_op*_{i32,i64} Richard Henderson
2023-11-07 2:48 ` [PATCH 17/35] tcg: Move tcg_constant_* out of line Richard Henderson
2023-11-07 2:48 ` [PATCH 18/35] tcg: Move tcg_temp_new_*, tcg_global_mem_new_* " Richard Henderson
2023-11-07 2:48 ` [PATCH 19/35] tcg: Move tcg_temp_free_* " Richard Henderson
2023-11-07 2:48 ` [PATCH 20/35] tcg/mips: Split out tcg_out_setcond_int Richard Henderson
2023-11-07 2:48 ` [PATCH 21/35] tcg/mips: Always implement movcond Richard Henderson
2023-11-07 2:48 ` [PATCH 22/35] tcg: Remove TCG_TARGET_HAS_movcond_{i32,i64} Richard Henderson
2023-11-07 2:48 ` [PATCH 23/35] tcg/mips: Implement neg opcodes Richard Henderson
2023-11-07 2:48 ` [PATCH 24/35] tcg/loongarch64: " Richard Henderson
2023-11-07 2:48 ` [PATCH 25/35] tcg: Remove TCG_TARGET_HAS_neg_{i32,i64} Richard Henderson
2023-11-07 2:48 ` [PATCH 26/35] tcg: Don't free vector results Richard Henderson
2023-11-07 2:48 ` [PATCH 27/35] tcg/optimize: Pipe OptContext into reset_ts Richard Henderson
2023-11-07 2:48 ` [PATCH 28/35] tcg/optimize: Split out cmp_better_copy Richard Henderson
2023-11-07 2:48 ` Richard Henderson [this message]
2023-11-07 2:48 ` [PATCH 30/35] tcg: Eliminate duplicate env store operations Richard Henderson
2023-11-07 2:48 ` [PATCH 31/35] tcg/optimize: Split out arg_new_constant Richard Henderson
2023-11-07 2:48 ` [PATCH 32/35] tcg: Canonicalize subi to addi during opcode generation Richard Henderson
2023-11-07 2:48 ` [PATCH 33/35] tcg/optimize: Canonicalize subi to addi during optimization Richard Henderson
2023-11-07 2:48 ` [PATCH 34/35] tcg/optimize: Canonicalize sub2 with constants to add2 Richard Henderson
2023-11-07 2:48 ` [PATCH 35/35] tcg/sparc64: Implement tcg_out_extrl_i64_i32 Richard Henderson
2023-11-07 2:55 ` [PULL 00/35] tcg patch queue Richard Henderson
2023-11-07 3:06 ` Stefan Hajnoczi
2023-11-07 4:59 ` [PATCH " Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231107024842.7650-30-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=gaosong@loongson.cn \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).