From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org, qemu-s390x@nongnu.org,
qemu-riscv@nongnu.org, qemu-ppc@nongnu.org
Subject: [PATCH 35/42] tcg/ppc: Reorg tcg_out_tlb_read
Date: Fri, 7 Apr 2023 19:43:07 -0700 [thread overview]
Message-ID: <20230408024314.3357414-37-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230408024314.3357414-1-richard.henderson@linaro.org>
Allocate TCG_REG_TMP2. Use R0, TMP1, TMP2 instead of any of
the normally allocated registers for the tlb load.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/ppc/tcg-target.c.inc | 83 +++++++++++++++++++++++-----------------
1 file changed, 48 insertions(+), 35 deletions(-)
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 383464b408..7195c0b817 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -68,6 +68,7 @@
#else
# define TCG_REG_TMP1 TCG_REG_R12
#endif
+#define TCG_REG_TMP2 TCG_REG_R11
#define TCG_VEC_TMP1 TCG_REG_V0
#define TCG_VEC_TMP2 TCG_REG_V1
@@ -2007,10 +2008,11 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
-/* Perform the TLB load and compare. Places the result of the comparison
- in CR7, loads the addend of the TLB into R3, and returns the register
- containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
-
+/*
+ * Perform the TLB load and compare. Places the result of the comparison
+ * in CR7, loads the addend of the TLB into TMP1, and returns the register
+ * containing the guest address (zero-extended into TMP2). Clobbers R0.
+ */
static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
TCGReg addrlo, TCGReg addrhi,
int mem_index, bool is_read)
@@ -2026,40 +2028,44 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
unsigned a_bits = get_alignment_bits(opc);
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
+ tcg_out_shri32(s, TCG_REG_R0, addrlo,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
} else {
- tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
+ tcg_out_shri64(s, TCG_REG_R0, addrlo,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
}
- tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
+ tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
- /* Load the TLB comparator. */
+ /* Load the (low part) TLB comparator into TMP2. */
if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
? LWZUX : LDUX);
- tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
+ tcg_out32(s, lxu | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
} else {
- tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
+ tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2,
+ TCG_REG_TMP1, cmp_off + 4);
} else {
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
}
}
- /* Load the TLB addend for use on the fast path. Do this asap
- to minimize any load use delay. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
- offsetof(CPUTLBEntry, addend));
+ /*
+ * Load the TLB addend for use on the fast path.
+ * Do this asap to minimize any load use delay.
+ */
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
+ offsetof(CPUTLBEntry, addend));
+ }
- /* Clear the non-page, non-alignment bits from the address */
+ /* Clear the non-page, non-alignment bits from the address into R0. */
if (TCG_TARGET_REG_BITS == 32) {
/* We don't support unaligned accesses on 32-bits.
* Preserve the bottom bits and thus trigger a comparison
@@ -2090,9 +2096,6 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
if (TARGET_LONG_BITS == 32) {
tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
(32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
- /* Zero-extend the address for use in the final address. */
- tcg_out_ext32u(s, TCG_REG_R4, addrlo);
- addrlo = TCG_REG_R4;
} else if (a_bits == 0) {
tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
} else {
@@ -2102,16 +2105,27 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
}
}
+ /* Full or low part comparison into cr7. */
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, TCG_TYPE_I32);
+
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
- 0, 7, TCG_TYPE_I32);
- tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
+ /* High part comparison into cr6. */
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_REG_TMP1, cmp_off);
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, addrhi, 0, 6, TCG_TYPE_I32);
+
+ /* Load addend, deferred for this case. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
+ offsetof(CPUTLBEntry, addend));
+
+ /* Combine comparisons into cr7. */
tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
- } else {
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
- 0, 7, TCG_TYPE_TL);
}
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
+ /* Zero-extend the address for use in the final address. */
+ tcg_out_ext32u(s, TCG_REG_TMP2, addrlo);
+ return TCG_REG_TMP2;
+ }
return addrlo;
}
@@ -2179,11 +2193,9 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
/*
* For the purposes of ppc32 sorting 4 input registers into 4 argument
* registers, there is an outside chance we would require 3 temps.
- * Because of constraints, no inputs are in r3, and env will not be
- * placed into r3 until after the sorting is done, and is thus free.
*/
tcg_out_st_helper_args(s, lb, tcg_out_mflr, -1, TCG_REG_TMP1,
- TCG_REG_R0, TCG_REG_R3);
+ TCG_REG_TMP2, TCG_REG_R0);
tcg_out_call_int(s, LK, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
@@ -2285,7 +2297,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType d_type)
label_ptr = s->code_ptr;
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
- rbase = TCG_REG_R3;
+ rbase = TCG_REG_TMP1;
#else /* !CONFIG_SOFTMMU */
a_bits = get_alignment_bits(opc);
if (a_bits) {
@@ -2366,7 +2378,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType d_type)
label_ptr = s->code_ptr;
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
- rbase = TCG_REG_R3;
+ rbase = TCG_REG_TMP1;
#else /* !CONFIG_SOFTMMU */
a_bits = get_alignment_bits(opc);
if (a_bits) {
@@ -3934,7 +3946,8 @@ static void tcg_target_init(TCGContext *s)
#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
#endif
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
if (USE_REG_TB) {
--
2.34.1
next prev parent reply other threads:[~2023-04-08 2:47 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-08 2:42 [PATCH for-8.1 00/42] tcg: Simplify calls to load/store helpers Richard Henderson
2023-04-08 2:42 ` [PATCH for-8.0] tcg/i386: Adjust assert in tcg_out_addi_ptr Richard Henderson
2023-04-08 2:42 ` [PATCH 01/42] tcg: Replace if + tcg_abort with tcg_debug_assert Richard Henderson
2023-04-11 10:52 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 02/42] tcg: Replace tcg_abort with g_assert_not_reached Richard Henderson
2023-04-11 10:53 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 03/42] tcg: Split out tcg_out_ext8s Richard Henderson
2023-04-21 21:45 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 04/42] tcg: Split out tcg_out_ext8u Richard Henderson
2023-04-21 21:50 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 05/42] tcg: Split out tcg_out_ext16s Richard Henderson
2023-04-21 21:53 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 06/42] tcg: Split out tcg_out_ext16u Richard Henderson
2023-04-21 22:03 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 07/42] tcg: Split out tcg_out_ext32s Richard Henderson
2023-04-08 2:42 ` [PATCH 08/42] tcg: Split out tcg_out_ext32u Richard Henderson
2023-04-08 2:42 ` [PATCH 09/42] tcg: Split out tcg_out_exts_i32_i64 Richard Henderson
2023-04-08 2:42 ` [PATCH 10/42] tcg/loongarch64: Conditionalize tcg_out_exts_i32_i64 Richard Henderson
2023-04-08 2:42 ` [PATCH 11/42] tcg/mips: " Richard Henderson
2023-04-08 2:42 ` [PATCH 12/42] tcg/riscv: " Richard Henderson
2023-04-08 2:42 ` [PATCH 13/42] tcg: Split out tcg_out_extu_i32_i64 Richard Henderson
2023-04-08 2:42 ` [PATCH 14/42] tcg/i386: Conditionalize tcg_out_extu_i32_i64 Richard Henderson
2023-04-08 2:42 ` [PATCH 15/42] tcg: Split out tcg_out_extrl_i64_i32 Richard Henderson
2023-04-08 2:42 ` [PATCH 16/42] tcg: Introduce tcg_out_movext Richard Henderson
2023-04-08 2:42 ` [PATCH 17/42] tcg: Introduce tcg_out_xchg Richard Henderson
2023-04-08 2:42 ` [PATCH 18/42] tcg: Introduce tcg_out_movext2 Richard Henderson
2023-04-08 2:42 ` [PATCH 19/42] tcg: Clear TCGLabelQemuLdst on allocation Richard Henderson
2023-04-11 12:10 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 20/42] tcg/i386: Use TCGType not bool is_64 in tcg_out_qemu_{ld, st} Richard Henderson
2023-04-11 12:12 ` Philippe Mathieu-Daudé
2023-04-12 11:51 ` Richard Henderson
2023-04-08 2:42 ` [PATCH 21/42] tcg/aarch64: Rename ext to d_type in tcg_out_qemu_ld Richard Henderson
2023-04-11 12:14 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 22/42] tcg/aarch64: Pass TGType to tcg_out_qemu_st Richard Henderson
2023-04-08 2:42 ` [PATCH 23/42] tcg/arm: Use TCGType not bool is_64 in tcg_out_qemu_{ld, st} Richard Henderson
2023-04-08 2:42 ` [PATCH 24/42] tcg/i386: " Richard Henderson
2023-04-11 12:17 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 25/42] tcg/ppc: " Richard Henderson
2023-04-11 12:18 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 26/42] tcg/s390x: Pass TCGType to tcg_out_qemu_{ld,st} Richard Henderson
2023-04-11 12:20 ` Philippe Mathieu-Daudé
2023-04-08 2:42 ` [PATCH 27/42] tcg/riscv: Require TCG_TARGET_REG_BITS == 64 Richard Henderson
2023-04-08 2:43 ` [PATCH 28/42] tcg/riscv: Expand arguments to tcg_out_qemu_{ld,st} Richard Henderson
2023-04-08 2:43 ` [PATCH 29/42] tcg: Move TCGLabelQemuLdst to tcg.c Richard Henderson
2023-04-08 2:43 ` [PATCH 30/42] tcg: Introduce tcg_out_ld_helper_args Richard Henderson
2023-04-08 2:43 ` [PATCH 31/42] tcg: Introduce tcg_out_st_helper_args Richard Henderson
2023-04-08 2:43 ` [PATCH 32/42] tcg/loongarch64: Simplify constraints on qemu_ld/st Richard Henderson
2023-04-08 2:43 ` [PATCH 33/42] tcg/mips: Reorg tcg_out_tlb_load Richard Henderson
2023-04-08 2:43 ` [PATCH 34/42] tcg/mips: Simplify constraints on qemu_ld/st Richard Henderson
2023-04-08 2:43 ` Richard Henderson [this message]
2023-04-08 2:43 ` [PATCH 36/42] tcg/ppc: Adjust " Richard Henderson
2023-04-08 2:43 ` [PATCH 37/42] tcg/ppc: Remove unused constraints A, B, C, D Richard Henderson
2023-04-08 2:43 ` [PATCH 38/42] tcg/riscv: Simplify constraints on qemu_ld/st Richard Henderson
2023-04-08 2:43 ` [PATCH 39/42] tcg/s390x: Use ALGFR in constructing host address for qemu_ld/st Richard Henderson
2023-04-08 2:43 ` [PATCH 40/42] tcg/s390x: Simplify constraints on qemu_ld/st Richard Henderson
2023-04-08 2:43 ` [PATCH 41/42] tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return Richard Henderson
2023-04-08 2:43 ` [PATCH 42/42] tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st} Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230408024314.3357414-37-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=qemu-s390x@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).