From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: cota@braap.org
Subject: [Qemu-devel] [PATCH v6 17/50] tcg: Introduce index_arg
Date: Mon, 16 Oct 2017 10:25:36 -0700 [thread overview]
Message-ID: <20171016172609.23422-18-richard.henderson@linaro.org> (raw)
In-Reply-To: <20171016172609.23422-1-richard.henderson@linaro.org>
For multi-threaded tcg we have one TCGContext per thread. With that,
plus static cpu_* variables, we need the translators to handle indicies.
We transform those to "arguments" at opcode generating time.
For now, that transformation is a no-op.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tcg-op.h | 92 ++++++++++++++++++++++++++++++------------------------------
tcg/tcg.h | 20 +++++++++++++
tcg/tcg-op.c | 14 ++++-----
tcg/tcg.c | 26 ++++++++---------
4 files changed, 86 insertions(+), 66 deletions(-)
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index de9a61206a..ca1a3becb9 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -37,12 +37,12 @@ void tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
{
- tcg_gen_op1(opc, GET_TCGV_I32(a1));
+ tcg_gen_op1(opc, tcgv_i32_arg(a1));
}
static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
{
- tcg_gen_op1(opc, GET_TCGV_I64(a1));
+ tcg_gen_op1(opc, tcgv_i64_arg(a1));
}
static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
@@ -52,22 +52,22 @@ static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
{
- tcg_gen_op2(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2));
+ tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
}
static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
{
- tcg_gen_op2(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2));
+ tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
}
static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 a1, TCGArg a2)
{
- tcg_gen_op2(opc, GET_TCGV_I32(a1), a2);
+ tcg_gen_op2(opc, tcgv_i32_arg(a1), a2);
}
static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 a1, TCGArg a2)
{
- tcg_gen_op2(opc, GET_TCGV_I64(a1), a2);
+ tcg_gen_op2(opc, tcgv_i64_arg(a1), a2);
}
static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2)
@@ -78,167 +78,167 @@ static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2)
static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
TCGv_i32 a2, TCGv_i32 a3)
{
- tcg_gen_op3(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), GET_TCGV_I32(a3));
+ tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3));
}
static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
TCGv_i64 a2, TCGv_i64 a3)
{
- tcg_gen_op3(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), GET_TCGV_I64(a3));
+ tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3));
}
static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
TCGv_i32 a2, TCGArg a3)
{
- tcg_gen_op3(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), a3);
+ tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
}
static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
TCGv_i64 a2, TCGArg a3)
{
- tcg_gen_op3(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), a3);
+ tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
}
static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
TCGv_ptr base, TCGArg offset)
{
- tcg_gen_op3(opc, GET_TCGV_I32(val), GET_TCGV_PTR(base), offset);
+ tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset);
}
static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
TCGv_ptr base, TCGArg offset)
{
- tcg_gen_op3(opc, GET_TCGV_I64(val), GET_TCGV_PTR(base), offset);
+ tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset);
}
static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4)
{
- tcg_gen_op4(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
- GET_TCGV_I32(a3), GET_TCGV_I32(a4));
+ tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4));
}
static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4)
{
- tcg_gen_op4(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
- GET_TCGV_I64(a3), GET_TCGV_I64(a4));
+ tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4));
}
static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGArg a4)
{
- tcg_gen_op4(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), GET_TCGV_I32(a3), a4);
+ tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3), a4);
}
static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGArg a4)
{
- tcg_gen_op4(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), GET_TCGV_I64(a3), a4);
+ tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3), a4);
}
static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGArg a3, TCGArg a4)
{
- tcg_gen_op4(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2), a3, a4);
+ tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
}
static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGArg a3, TCGArg a4)
{
- tcg_gen_op4(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2), a3, a4);
+ tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
}
static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
{
- tcg_gen_op5(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
- GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5));
+ tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5));
}
static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
{
- tcg_gen_op5(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
- GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5));
+ tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5));
}
static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4, TCGArg a5)
{
- tcg_gen_op5(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
- GET_TCGV_I32(a3), GET_TCGV_I32(a4), a5);
+ tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5);
}
static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4, TCGArg a5)
{
- tcg_gen_op5(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
- GET_TCGV_I64(a3), GET_TCGV_I64(a4), a5);
+ tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5);
}
static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGArg a4, TCGArg a5)
{
- tcg_gen_op5(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
- GET_TCGV_I32(a3), a4, a5);
+ tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), a4, a5);
}
static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGArg a4, TCGArg a5)
{
- tcg_gen_op5(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
- GET_TCGV_I64(a3), a4, a5);
+ tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcgv_i64_arg(a3), a4, a5);
}
static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4,
TCGv_i32 a5, TCGv_i32 a6)
{
- tcg_gen_op6(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
- GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5),
- GET_TCGV_I32(a6));
+ tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
+ tcgv_i32_arg(a6));
}
static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4,
TCGv_i64 a5, TCGv_i64 a6)
{
- tcg_gen_op6(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
- GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5),
- GET_TCGV_I64(a6));
+ tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
+ tcgv_i64_arg(a6));
}
static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4,
TCGv_i32 a5, TCGArg a6)
{
- tcg_gen_op6(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
- GET_TCGV_I32(a3), GET_TCGV_I32(a4), GET_TCGV_I32(a5), a6);
+ tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6);
}
static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4,
TCGv_i64 a5, TCGArg a6)
{
- tcg_gen_op6(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
- GET_TCGV_I64(a3), GET_TCGV_I64(a4), GET_TCGV_I64(a5), a6);
+ tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6);
}
static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
TCGv_i32 a3, TCGv_i32 a4,
TCGArg a5, TCGArg a6)
{
- tcg_gen_op6(opc, GET_TCGV_I32(a1), GET_TCGV_I32(a2),
- GET_TCGV_I32(a3), GET_TCGV_I32(a4), a5, a6);
+ tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
}
static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
TCGv_i64 a3, TCGv_i64 a4,
TCGArg a5, TCGArg a6)
{
- tcg_gen_op6(opc, GET_TCGV_I64(a1), GET_TCGV_I64(a2),
- GET_TCGV_I64(a3), GET_TCGV_I64(a4), a5, a6);
+ tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5, a6);
}
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 0d61932301..b8ede7fe5c 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -756,6 +756,26 @@ static inline size_t arg_index(TCGArg a)
return a;
}
+static inline TCGArg index_arg(size_t n)
+{
+ return n;
+}
+
+static inline TCGArg tcgv_i32_arg(TCGv_i32 t)
+{
+ return index_arg(GET_TCGV_I32(t));
+}
+
+static inline TCGArg tcgv_i64_arg(TCGv_i64 t)
+{
+ return index_arg(GET_TCGV_I64(t));
+}
+
+static inline TCGArg tcgv_ptr_arg(TCGv_ptr t)
+{
+ return index_arg(GET_TCGV_PTR(t));
+}
+
static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
{
tcg_ctx.gen_op_buf[op_idx].args[arg] = v;
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index bff4b95097..be4b623e82 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -2458,7 +2458,7 @@ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
} else if (TCG_TARGET_HAS_extrl_i64_i32) {
tcg_gen_op2(INDEX_op_extrl_i64_i32,
- GET_TCGV_I32(ret), GET_TCGV_I64(arg));
+ tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else {
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
}
@@ -2470,7 +2470,7 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
} else if (TCG_TARGET_HAS_extrh_i64_i32) {
tcg_gen_op2(INDEX_op_extrh_i64_i32,
- GET_TCGV_I32(ret), GET_TCGV_I64(arg));
+ tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else {
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, arg, 32);
@@ -2486,7 +2486,7 @@ void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
} else {
tcg_gen_op2(INDEX_op_extu_i32_i64,
- GET_TCGV_I64(ret), GET_TCGV_I32(arg));
+ tcgv_i64_arg(ret), tcgv_i32_arg(arg));
}
}
@@ -2497,7 +2497,7 @@ void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
} else {
tcg_gen_op2(INDEX_op_ext_i32_i64,
- GET_TCGV_I64(ret), GET_TCGV_I32(arg));
+ tcgv_i64_arg(ret), tcgv_i32_arg(arg));
}
}
@@ -2563,7 +2563,7 @@ void tcg_gen_lookup_and_goto_ptr(void)
if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
TCGv_ptr ptr = tcg_temp_new_ptr();
gen_helper_lookup_tb_ptr(ptr, tcg_ctx.tcg_env);
- tcg_gen_op1i(INDEX_op_goto_ptr, GET_TCGV_PTR(ptr));
+ tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
} else {
tcg_gen_exit_tb(0);
@@ -2608,7 +2608,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi);
} else {
- tcg_gen_op3(opc, GET_TCGV_I32(val), GET_TCGV_I64(addr), oi);
+ tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi);
}
#endif
}
@@ -2621,7 +2621,7 @@ static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
} else {
- tcg_gen_op3(opc, GET_TCGV_I64(val), GET_TCGV_I32(addr), oi);
+ tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi);
}
#else
if (TCG_TARGET_REG_BITS == 32) {
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 113700ccc1..129aecca60 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -1054,25 +1054,25 @@ void tcg_gen_callN(void *func, TCGArg ret, int nargs, TCGArg *args)
two return temporaries, and reassemble below. */
retl = tcg_temp_new_i64();
reth = tcg_temp_new_i64();
- op->args[pi++] = GET_TCGV_I64(reth);
- op->args[pi++] = GET_TCGV_I64(retl);
+ op->args[pi++] = tcgv_i64_arg(reth);
+ op->args[pi++] = tcgv_i64_arg(retl);
nb_rets = 2;
} else {
- op->args[pi++] = ret;
+ op->args[pi++] = index_arg(ret);
nb_rets = 1;
}
#else
if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
#ifdef HOST_WORDS_BIGENDIAN
- op->args[pi++] = ret + 1;
- op->args[pi++] = ret;
+ op->args[pi++] = index_arg(ret + 1);
+ op->args[pi++] = index_arg(ret);
#else
- op->args[pi++] = ret;
- op->args[pi++] = ret + 1;
+ op->args[pi++] = index_arg(ret);
+ op->args[pi++] = index_arg(ret + 1);
#endif
nb_rets = 2;
} else {
- op->args[pi++] = ret;
+ op->args[pi++] = index_arg(ret);
nb_rets = 1;
}
#endif
@@ -1103,17 +1103,17 @@ void tcg_gen_callN(void *func, TCGArg ret, int nargs, TCGArg *args)
have to get more complicated to differentiate between
stack arguments and register arguments. */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
- op->args[pi++] = args[i] + 1;
- op->args[pi++] = args[i];
+ op->args[pi++] = index_arg(args[i] + 1);
+ op->args[pi++] = index_arg(args[i]);
#else
- op->args[pi++] = args[i];
- op->args[pi++] = args[i] + 1;
+ op->args[pi++] = index_arg(args[i]);
+ op->args[pi++] = index_arg(args[i] + 1);
#endif
real_args += 2;
continue;
}
- op->args[pi++] = args[i];
+ op->args[pi++] = index_arg(args[i]);
real_args++;
}
op->args[pi++] = (uintptr_t)func;
--
2.13.6
next prev parent reply other threads:[~2017-10-16 17:26 UTC|newest]
Thread overview: 94+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-16 17:25 [Qemu-devel] [PATCH v6 00/50] tcg tb_lock removal Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 01/50] tcg: Merge opcode arguments into TCGOp Richard Henderson
2017-10-17 20:04 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 02/50] tcg: Propagate args to op->args in optimizer Richard Henderson
2017-10-17 20:28 ` Emilio G. Cota
2017-10-17 20:33 ` Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 03/50] tcg: Propagate args to op->args in tcg.c Richard Henderson
2017-10-17 20:29 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 04/50] tcg: Propagate TCGOp down to allocators Richard Henderson
2017-10-17 20:33 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 05/50] tcg: Introduce arg_temp Richard Henderson
2017-10-17 20:43 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 06/50] tcg: Add temp_global bit to TCGTemp Richard Henderson
2017-10-17 20:43 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 07/50] tcg: Return NULL temp for TCG_CALL_DUMMY_ARG Richard Henderson
2017-10-17 20:56 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 08/50] tcg: Introduce temp_arg Richard Henderson
2017-10-17 21:00 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 09/50] tcg: Use per-temp state data in liveness Richard Henderson
2017-10-17 21:50 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 10/50] tcg: Avoid loops against variable bounds Richard Henderson
2017-10-17 22:03 ` Emilio G. Cota
2017-10-18 4:30 ` Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 11/50] tcg: Change temp_allocate_frame arg to TCGTemp Richard Henderson
2017-10-17 22:07 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 12/50] tcg: Remove unused TCG_CALL_DUMMY_TCGV Richard Henderson
2017-10-17 22:07 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 13/50] tcg: Export temp_idx Richard Henderson
2017-10-17 22:10 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 14/50] tcg: Use per-temp state data in optimize Richard Henderson
2017-10-17 22:16 ` Emilio G. Cota
2017-10-18 4:31 ` Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 15/50] tcg: Push tcg_ctx into generator functions Richard Henderson
2017-10-17 22:17 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 16/50] tcg: Push tcg_ctx into tcg_gen_callN Richard Henderson
2017-10-17 22:18 ` Emilio G. Cota
2017-10-16 17:25 ` Richard Henderson [this message]
2017-10-17 22:52 ` [Qemu-devel] [PATCH v6 17/50] tcg: Introduce index_arg Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 18/50] tcg: Reserve temporary index 0 Richard Henderson
2017-10-17 23:19 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 19/50] target/alpha: Avoid translate_init unless tcg_enabled Richard Henderson
2017-10-17 23:27 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 20/50] qom: Introduce CPUClass.tcg_initialize Richard Henderson
2017-10-17 23:53 ` Emilio G. Cota
2017-10-18 19:12 ` Andreas Färber
2017-10-18 21:35 ` Philippe Mathieu-Daudé
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 21/50] tcg: Use pointers in TCGOp->args Richard Henderson
2017-10-18 4:13 ` Emilio G. Cota
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 22/50] tcg: define CF_PARALLEL and use it for TB hashing along with CF_COUNT_MASK Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 23/50] hack dump tb->flags and tb->cflags Richard Henderson
2017-10-18 4:15 ` Emilio G. Cota
2017-10-18 4:49 ` Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 24/50] tcg: Add CPUState step_next_tb Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 25/50] tcg: Include CF_COUNT_MASK in CF_HASH_MASK Richard Henderson
2017-10-18 4:31 ` Emilio G. Cota
2017-10-20 2:27 ` Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 26/50] tcg: convert tb->cflags reads to tb_cflags(tb) Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 27/50] target/arm: check CF_PARALLEL instead of parallel_cpus Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 28/50] target/hppa: " Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 29/50] target/i386: " Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 30/50] target/m68k: " Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 31/50] target/s390x: " Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 32/50] target/sh4: " Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 33/50] target/sparc: " Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 34/50] tcg: " Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 35/50] cpu-exec: lookup/generate TB outside exclusive region during step_atomic Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 36/50] tcg: Add CF_LAST_IO + CF_USE_ICOUNT to CF_HASH_MASK Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 37/50] tcg: Remove CF_IGNORE_ICOUNT Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 38/50] translate-all: use a binary search tree to track TBs in TBContext Richard Henderson
2017-10-18 7:41 ` Paolo Bonzini
2017-10-18 18:19 ` Emilio G. Cota
2017-10-20 2:30 ` Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 39/50] exec-all: rename tb_free to tb_remove Richard Henderson
2017-10-16 17:25 ` [Qemu-devel] [PATCH v6 40/50] translate-all: report correct avg host TB size Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 41/50] tcg: take tb_ctx out of TCGContext Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 42/50] tcg: define tcg_init_ctx and make tcg_ctx a pointer Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 43/50] gen-icount: fold exitreq_label into TCGContext Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 44/50] tcg: introduce **tcg_ctxs to keep track of all TCGContext's Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 45/50] tcg: distribute profiling counters across TCGContext's Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 46/50] tcg: allocate optimizer temps with tcg_malloc Richard Henderson
2017-10-18 4:35 ` Emilio G. Cota
2017-10-18 20:24 ` Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 47/50] osdep: introduce qemu_mprotect_rwx/none Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 48/50] translate-all: use qemu_protect_rwx/none helpers Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 49/50] tcg: introduce regions to split code_gen_buffer Richard Henderson
2017-10-16 17:26 ` [Qemu-devel] [PATCH v6 50/50] tcg: enable multiple TCG contexts in softmmu Richard Henderson
2017-10-16 18:45 ` [Qemu-devel] [PATCH v6 00/50] tcg tb_lock removal no-reply
2017-10-18 4:04 ` Emilio G. Cota
2017-10-18 22:45 ` Emilio G. Cota
2017-10-19 13:05 ` Paolo Bonzini
2017-10-19 20:11 ` Emilio G. Cota
2017-10-20 7:10 ` Paolo Bonzini
2017-10-21 2:34 ` Emilio G. Cota
2017-10-26 1:47 ` Emilio G. Cota
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171016172609.23422-18-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=cota@braap.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).