From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PATCH for-8.0 v3 12/45] tcg: Allocate TCGTemp pairs in host memory order
Date: Fri, 11 Nov 2022 17:40:28 +1000 [thread overview]
Message-ID: <20221111074101.2069454-13-richard.henderson@linaro.org> (raw)
In-Reply-To: <20221111074101.2069454-1-richard.henderson@linaro.org>
Allocate the first of a pair at the lower address, and the
second of a pair at the higher address. This will make it
easier to find the beginning of the larger memory block.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tcg-internal.h | 4 ++--
tcg/tcg.c | 58 ++++++++++++++++++++++------------------------
2 files changed, 30 insertions(+), 32 deletions(-)
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
index a9ea27f67a..2c06b5116a 100644
--- a/tcg/tcg-internal.h
+++ b/tcg/tcg-internal.h
@@ -62,11 +62,11 @@ static inline unsigned tcg_call_flags(TCGOp *op)
#if TCG_TARGET_REG_BITS == 32
static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
{
- return temp_tcgv_i32(tcgv_i64_temp(t));
+ return temp_tcgv_i32(tcgv_i64_temp(t) + HOST_BIG_ENDIAN);
}
static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
{
- return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
+ return temp_tcgv_i32(tcgv_i64_temp(t) + !HOST_BIG_ENDIAN);
}
#else
extern TCGv_i32 TCGV_LOW(TCGv_i64) QEMU_ERROR("32-bit code path is reachable");
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 583677a1c4..f9315d00fc 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -886,10 +886,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
TCGContext *s = tcg_ctx;
TCGTemp *base_ts = tcgv_ptr_temp(base);
TCGTemp *ts = tcg_global_alloc(s);
- int indirect_reg = 0, bigendian = 0;
-#if HOST_BIG_ENDIAN
- bigendian = 1;
-#endif
+ int indirect_reg = 0;
switch (base_ts->kind) {
case TEMP_FIXED:
@@ -915,7 +912,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
ts->indirect_reg = indirect_reg;
ts->mem_allocated = 1;
ts->mem_base = base_ts;
- ts->mem_offset = offset + bigendian * 4;
+ ts->mem_offset = offset;
pstrcpy(buf, sizeof(buf), name);
pstrcat(buf, sizeof(buf), "_0");
ts->name = strdup(buf);
@@ -926,7 +923,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
ts2->indirect_reg = indirect_reg;
ts2->mem_allocated = 1;
ts2->mem_base = base_ts;
- ts2->mem_offset = offset + (1 - bigendian) * 4;
+ ts2->mem_offset = offset + 4;
ts2->temp_subindex = 1;
pstrcpy(buf, sizeof(buf), name);
pstrcat(buf, sizeof(buf), "_1");
@@ -1072,37 +1069,43 @@ TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
ts = g_hash_table_lookup(h, &val);
if (ts == NULL) {
+ int64_t *val_ptr;
+
ts = tcg_temp_alloc(s);
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
TCGTemp *ts2 = tcg_temp_alloc(s);
+ tcg_debug_assert(ts2 == ts + 1);
+
ts->base_type = TCG_TYPE_I64;
ts->type = TCG_TYPE_I32;
ts->kind = TEMP_CONST;
ts->temp_allocated = 1;
- /*
- * Retain the full value of the 64-bit constant in the low
- * part, so that the hash table works. Actual uses will
- * truncate the value to the low part.
- */
- ts->val = val;
- tcg_debug_assert(ts2 == ts + 1);
ts2->base_type = TCG_TYPE_I64;
ts2->type = TCG_TYPE_I32;
ts2->kind = TEMP_CONST;
ts2->temp_allocated = 1;
ts2->temp_subindex = 1;
- ts2->val = val >> 32;
+
+ /*
+ * Retain the full value of the 64-bit constant in the low
+ * part, so that the hash table works. Actual uses will
+ * truncate the value to the low part.
+ */
+ ts[HOST_BIG_ENDIAN].val = val;
+ ts[!HOST_BIG_ENDIAN].val = val >> 32;
+ val_ptr = &ts[HOST_BIG_ENDIAN].val;
} else {
ts->base_type = type;
ts->type = type;
ts->kind = TEMP_CONST;
ts->temp_allocated = 1;
ts->val = val;
+ val_ptr = &ts->val;
}
- g_hash_table_insert(h, &ts->val, ts);
+ g_hash_table_insert(h, val_ptr, ts);
}
return ts;
@@ -1514,13 +1517,8 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
pi = 0;
if (ret != NULL) {
if (TCG_TARGET_REG_BITS < 64 && (typemask & 6) == dh_typecode_i64) {
-#if HOST_BIG_ENDIAN
- op->args[pi++] = temp_arg(ret + 1);
- op->args[pi++] = temp_arg(ret);
-#else
op->args[pi++] = temp_arg(ret);
op->args[pi++] = temp_arg(ret + 1);
-#endif
nb_rets = 2;
} else {
op->args[pi++] = temp_arg(ret);
@@ -1554,8 +1552,8 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
}
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
- op->args[pi++] = temp_arg(args[i] + HOST_BIG_ENDIAN);
- op->args[pi++] = temp_arg(args[i] + !HOST_BIG_ENDIAN);
+ op->args[pi++] = temp_arg(args[i]);
+ op->args[pi++] = temp_arg(args[i] + 1);
real_args += 2;
continue;
}
@@ -4116,14 +4114,14 @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
}
/* If the two inputs form one 64-bit value, try dupm_vec. */
- if (itsl + 1 == itsh && itsl->base_type == TCG_TYPE_I64) {
- temp_sync(s, itsl, s->reserved_regs, 0, 0);
- temp_sync(s, itsh, s->reserved_regs, 0, 0);
-#if HOST_BIG_ENDIAN
- TCGTemp *its = itsh;
-#else
- TCGTemp *its = itsl;
-#endif
+ if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
+ itsh->temp_subindex == !HOST_BIG_ENDIAN &&
+ itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
+ TCGTemp *its = itsl - HOST_BIG_ENDIAN;
+
+ temp_sync(s, its + 0, s->reserved_regs, 0, 0);
+ temp_sync(s, its + 1, s->reserved_regs, 0, 0);
+
if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
its->mem_base->reg, its->mem_offset)) {
goto done;
--
2.34.1
next prev parent reply other threads:[~2022-11-11 7:54 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-11 7:40 [PATCH for-8.0 v3 00/45] tcg: Support for Int128 with helpers Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 01/45] meson: Move CONFIG_TCG_INTERPRETER to config_host Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 02/45] tcg: Tidy tcg_reg_alloc_op Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 03/45] tcg: Introduce paired register allocation Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 04/45] tcg/s390x: Use register pair allocation for div and mulu2 Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 05/45] tcg/arm: Use register pair allocation for qemu_{ld, st}_i64 Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 06/45] tcg: Remove TCG_TARGET_STACK_GROWSUP Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 07/45] accel/tcg: Set cflags_next_tb in cpu_common_initfn Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 08/45] target/sparc: Avoid TCGV_{LOW,HIGH} Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 09/45] tcg: Move TCG_{LOW,HIGH} to tcg-internal.h Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 10/45] tcg: Add temp_subindex to TCGTemp Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 11/45] tcg: Simplify calls to temp_sync vs mem_coherent Richard Henderson
2022-11-11 7:40 ` Richard Henderson [this message]
2022-11-22 11:25 ` [PATCH for-8.0 v3 12/45] tcg: Allocate TCGTemp pairs in host memory order Philippe Mathieu-Daudé
2022-11-11 7:40 ` [PATCH for-8.0 v3 13/45] tcg: Move TCG_TYPE_COUNT outside enum Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 14/45] tcg: Introduce tcg_type_size Richard Henderson
2022-11-22 11:30 ` Philippe Mathieu-Daudé
2022-11-22 16:54 ` Richard Henderson
2022-11-22 18:14 ` Philippe Mathieu-Daudé
2022-11-22 18:15 ` Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 15/45] tcg: Introduce TCGCallReturnKind and TCGCallArgumentKind Richard Henderson
2022-11-22 11:33 ` Philippe Mathieu-Daudé
2022-11-11 7:40 ` [PATCH for-8.0 v3 16/45] tcg: Replace TCG_TARGET_CALL_ALIGN_ARGS with TCG_TARGET_CALL_ARG_I64 Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 17/45] tcg: Replace TCG_TARGET_EXTEND_ARGS with TCG_TARGET_CALL_ARG_I32 Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 18/45] tcg: Use TCG_CALL_ARG_EVEN for TCI special case Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 19/45] accel/tcg/plugin: Don't search for the function pointer index Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 20/45] accel/tcg/plugin: Avoid duplicate copy in copy_call Richard Henderson
2022-11-22 15:21 ` Philippe Mathieu-Daudé
2022-11-11 7:40 ` [PATCH for-8.0 v3 21/45] accel/tcg/plugin: Use copy_op in append_{udata, mem}_cb Richard Henderson
2022-11-22 15:22 ` Philippe Mathieu-Daudé
2022-11-11 7:40 ` [PATCH for-8.0 v3 22/45] tci: MAX_OPC_PARAM_IARGS is no longer used Richard Henderson
2022-11-22 15:25 ` Philippe Mathieu-Daudé
2022-11-11 7:40 ` [PATCH for-8.0 v3 23/45] tcg: Vary the allocation size for TCGOp Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 24/45] tcg: Use output_pref wrapper function Richard Henderson
2022-11-22 15:28 ` Philippe Mathieu-Daudé
2022-11-11 7:40 ` [PATCH for-8.0 v3 25/45] tcg: Reorg function calls Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 26/45] tcg: Move ffi_cif pointer into TCGHelperInfo Richard Henderson
2022-11-22 18:08 ` [PATCH 0/3] tcg: Move ffi_cif pointer into TCGHelperInfo (splitted) Philippe Mathieu-Daudé
2022-11-22 18:08 ` [PATCH 1/3] tcg: Convert typecode_to_ffi from array to function Philippe Mathieu-Daudé
2022-11-22 18:08 ` [PATCH 2/3] tcg: Factor init_ffi_layouts() out of tcg_context_init() Philippe Mathieu-Daudé
2022-11-22 18:08 ` [PATCH 3/3] tcg: Move ffi_cif pointer into TCGHelperInfo Philippe Mathieu-Daudé
2022-11-23 16:22 ` Philippe Mathieu-Daudé
2022-11-11 7:40 ` [PATCH for-8.0 v3 27/45] tcg/aarch64: Merge tcg_out_callr into tcg_out_call Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 28/45] tcg: Add TCGHelperInfo argument to tcg_out_call Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 29/45] tcg: Define TCG_TYPE_I128 and related helper macros Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 30/45] tcg: Handle dh_typecode_i128 with TCG_CALL_{RET, ARG}_NORMAL Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 31/45] tcg: Allocate objects contiguously in temp_allocate_frame Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 32/45] tcg: Introduce tcg_out_addi_ptr Richard Henderson
2022-11-22 9:45 ` Daniel Henrique Barboza
2022-11-11 7:40 ` [PATCH for-8.0 v3 33/45] tcg: Add TCG_CALL_{RET,ARG}_BY_REF Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 34/45] tcg: Introduce tcg_target_call_oarg_reg Richard Henderson
2022-11-22 9:41 ` Daniel Henrique Barboza
2022-11-11 7:40 ` [PATCH for-8.0 v3 35/45] tcg: Add TCG_CALL_RET_BY_VEC Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 36/45] include/qemu/int128: Use Int128 structure for TCI Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 37/45] tcg/i386: Add TCG_TARGET_CALL_{RET, ARG}_I128 Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 38/45] tcg/tci: Fix big-endian return register ordering Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 39/45] tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128 Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 40/45] tcg: " Richard Henderson
2022-11-22 9:47 ` Daniel Henrique Barboza
2022-11-11 7:40 ` [PATCH for-8.0 v3 41/45] tcg: Add temp allocation for TCGv_i128 Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 42/45] tcg: Add basic data movement " Richard Henderson
2022-11-11 7:40 ` [PATCH for-8.0 v3 43/45] tcg: Add guest load/store primitives " Richard Henderson
2022-11-11 7:41 ` [PATCH for-8.0 v3 44/45] tcg: Add tcg_gen_{non}atomic_cmpxchg_i128 Richard Henderson
2022-11-11 7:41 ` [PATCH for-8.0 v3 45/45] tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32, 64} Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221111074101.2069454-13-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).