From: Richard Henderson <rth@twiddle.net>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
Aurelien Jarno <aurelien@aurel32.net>
Subject: [Qemu-devel] [PATCH v3 10/20] tcg-arm: Use TCG_REG_TMP name for the tcg temporary
Date: Thu, 28 Mar 2013 08:32:51 -0700 [thread overview]
Message-ID: <1364484781-15561-11-git-send-email-rth@twiddle.net> (raw)
In-Reply-To: <1364484781-15561-1-git-send-email-rth@twiddle.net>
Don't hard-code R8.
Signed-off-by: Richard Henderson <rth@twiddle.net>
---
tcg/arm/tcg-target.c | 125 ++++++++++++++++++++++++++-------------------------
1 file changed, 64 insertions(+), 61 deletions(-)
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index e599794..90440fb 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -113,6 +113,8 @@ static const int tcg_target_call_oarg_regs[2] = {
TCG_REG_R0, TCG_REG_R1
};
+#define TCG_REG_TMP TCG_REG_R8
+
static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
{
*(uint32_t *) code_ptr = target;
@@ -550,10 +552,10 @@ static inline void tcg_out_mul32(TCGContext *s,
tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
(rm << 8) | 0x90 | rs);
else {
- tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
+ tcg_out32(s, (cond << 28) | (TCG_REG_TMP << 16) | (0 << 12) |
(rs << 8) | 0x90 | rm);
tcg_out_dat_reg(s, cond, ARITH_MOV,
- rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
+ rd, 0, TCG_REG_TMP, SHIFT_IMM_LSL(0));
}
}
@@ -568,8 +570,8 @@ static inline void tcg_out_umull32(TCGContext *s,
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
else {
tcg_out_dat_reg(s, cond, ARITH_MOV,
- TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
- tcg_out32(s, (cond << 28) | 0x800098 |
+ TCG_REG_TMP, 0, rm, SHIFT_IMM_LSL(0));
+ tcg_out32(s, (cond << 28) | 0x800090 | TCG_REG_TMP |
(rd1 << 16) | (rd0 << 12) | (rs << 8));
}
}
@@ -585,8 +587,8 @@ static inline void tcg_out_smull32(TCGContext *s,
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
else {
tcg_out_dat_reg(s, cond, ARITH_MOV,
- TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
- tcg_out32(s, (cond << 28) | 0xc00098 |
+ TCG_REG_TMP, 0, rm, SHIFT_IMM_LSL(0));
+ tcg_out32(s, (cond << 28) | 0xc00090 | TCG_REG_TMP |
(rd1 << 16) | (rd0 << 12) | (rs << 8));
}
}
@@ -656,11 +658,11 @@ static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
} else {
tcg_out_dat_reg(s, cond, ARITH_MOV,
- TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
+ TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
tcg_out_dat_reg(s, cond, ARITH_MOV,
- TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_ASR(16));
+ TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
tcg_out_dat_reg(s, cond, ARITH_ORR,
- rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
+ rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
}
}
@@ -671,11 +673,11 @@ static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
} else {
tcg_out_dat_reg(s, cond, ARITH_MOV,
- TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
+ TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
tcg_out_dat_reg(s, cond, ARITH_MOV,
- TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_LSR(16));
+ TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
tcg_out_dat_reg(s, cond, ARITH_ORR,
- rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
+ rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
}
}
@@ -688,10 +690,10 @@ static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
} else {
tcg_out_dat_reg(s, cond, ARITH_MOV,
- TCG_REG_R8, 0, rn, SHIFT_IMM_LSR(8));
- tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R8, TCG_REG_R8, 0xff);
+ TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
+ tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
tcg_out_dat_reg(s, cond, ARITH_ORR,
- rd, TCG_REG_R8, rn, SHIFT_IMM_LSL(8));
+ rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
}
}
@@ -702,13 +704,13 @@ static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
} else {
tcg_out_dat_reg(s, cond, ARITH_EOR,
- TCG_REG_R8, rn, rn, SHIFT_IMM_ROR(16));
+ TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
tcg_out_dat_imm(s, cond, ARITH_BIC,
- TCG_REG_R8, TCG_REG_R8, 0xff | 0x800);
+ TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
tcg_out_dat_reg(s, cond, ARITH_MOV,
rd, 0, rn, SHIFT_IMM_ROR(8));
tcg_out_dat_reg(s, cond, ARITH_EOR,
- rd, rd, TCG_REG_R8, SHIFT_IMM_LSR(8));
+ rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
}
}
@@ -732,8 +734,8 @@ static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
/* bfi becomes bfc with rn == 15. */
a1 = 15;
} else {
- tcg_out_movi32(s, cond, TCG_REG_R8, a1);
- a1 = TCG_REG_R8;
+ tcg_out_movi32(s, cond, TCG_REG_TMP, a1);
+ a1 = TCG_REG_TMP;
}
}
/* bfi/bfc */
@@ -928,8 +930,8 @@ static inline void tcg_out_ld32u(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_ld32_12(s, cond, rd, rn, offset);
}
@@ -938,8 +940,8 @@ static inline void tcg_out_st32(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_st32_12(s, cond, rd, rn, offset);
}
@@ -948,8 +950,8 @@ static inline void tcg_out_ld16u(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_ld16u_8(s, cond, rd, rn, offset);
}
@@ -958,8 +960,8 @@ static inline void tcg_out_ld16s(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_ld16s_8(s, cond, rd, rn, offset);
}
@@ -968,8 +970,8 @@ static inline void tcg_out_st16(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_st16_8(s, cond, rd, rn, offset);
}
@@ -978,8 +980,8 @@ static inline void tcg_out_ld8u(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_ld8_12(s, cond, rd, rn, offset);
}
@@ -988,8 +990,8 @@ static inline void tcg_out_ld8s(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xff || offset < -0xff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_ld8s_8(s, cond, rd, rn, offset);
}
@@ -998,8 +1000,8 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
int rd, int rn, int32_t offset)
{
if (offset > 0xfff || offset < -0xfff) {
- tcg_out_movi32(s, cond, TCG_REG_R8, offset);
- tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
+ tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
} else
tcg_out_st8_12(s, cond, rd, rn, offset);
}
@@ -1027,10 +1029,10 @@ static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
tcg_out32(s, addr);
} else {
- tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
+ tcg_out_movi32(s, cond, TCG_REG_TMP, val - 8);
tcg_out_dat_reg(s, cond, ARITH_ADD,
TCG_REG_PC, TCG_REG_PC,
- TCG_REG_R8, SHIFT_IMM_LSL(0));
+ TCG_REG_TMP, SHIFT_IMM_LSL(0));
}
}
}
@@ -1131,12 +1133,13 @@ static const void * const qemu_st_helpers[4] = {
if (argreg < 4) { \
TCG_OUT_ARG_GET_ARG(argreg); \
} else if (argreg == 4) { \
- TCG_OUT_ARG_GET_ARG(TCG_REG_R8); \
- tcg_out32(s, (COND_AL << 28) | 0x052d8010); \
+ TCG_OUT_ARG_GET_ARG(TCG_REG_TMP); \
+ tcg_out32(s, (COND_AL << 28) | 0x052d0010 | (TCG_REG_TMP << 12)); \
} else { \
assert(argreg < 8); \
- TCG_OUT_ARG_GET_ARG(TCG_REG_R8); \
- tcg_out32(s, (COND_AL << 28) | 0x058d8000 | (argreg - 4) * 4); \
+ TCG_OUT_ARG_GET_ARG(TCG_REG_TMP); \
+ tcg_out_st32_12(s, COND_AL, TCG_REG_TMP, TCG_REG_CALL_STACK, \
+ (argreg - 4) * 4); \
} \
return argreg + 1; \
}
@@ -1234,10 +1237,10 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
# if CPU_TLB_BITS > 8
# error
# endif
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
tcg_out_dat_imm(s, COND_AL, ARITH_AND,
- TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
+ TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* We assume that the offset is contained within 20 bits. */
@@ -1250,7 +1253,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
}
tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
- TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */
if (s_bits)
tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
@@ -1355,9 +1358,9 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
i = ctz32(offset) & ~1;
rot = ((32 - i) << 7) & 0xf00;
- tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg,
((offset >> i) & 0xff) | rot);
- addr_reg = TCG_REG_R8;
+ addr_reg = TCG_REG_TMP;
offset &= ~(0xff << i);
}
}
@@ -1444,9 +1447,9 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
* add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
*/
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
- TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
+ TCG_REG_TMP, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
tcg_out_dat_imm(s, COND_AL, ARITH_AND,
- TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
+ TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* We assume that the offset is contained within 20 bits. */
@@ -1459,7 +1462,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
}
tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
- TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */
if (s_bits)
tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
@@ -1737,7 +1740,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
a0 = args[0], a1 = args[1], a2 = args[2];
a3 = args[3], a4 = args[4], a5 = args[5];
if (a0 == a3 || (a0 == a5 && !const_args[5])) {
- a0 = TCG_REG_R8;
+ a0 = TCG_REG_TMP;
}
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
a0, a2, a4, const_args[4]);
@@ -1749,7 +1752,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
a0 = args[0], a1 = args[1], a2 = args[2];
a3 = args[3], a4 = args[4], a5 = args[5];
if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
- a0 = TCG_REG_R8;
+ a0 = TCG_REG_TMP;
}
if (const_args[2]) {
if (const_args[4]) {
@@ -1817,9 +1820,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
SHIFT_IMM_LSL(0));
} else {
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_R8, args[1], 0x20);
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[1], 0x20);
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
- SHIFT_REG_ROR(TCG_REG_R8));
+ SHIFT_REG_ROR(TCG_REG_TMP));
}
break;
@@ -1924,15 +1927,15 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
break;
case INDEX_op_rem_i32:
- tcg_out_sdiv(s, COND_AL, TCG_REG_R8, args[1], args[2]);
- tcg_out_mul32(s, COND_AL, TCG_REG_R8, TCG_REG_R8, args[2]);
- tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_R8,
+ tcg_out_sdiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
+ tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
SHIFT_IMM_LSL(0));
break;
case INDEX_op_remu_i32:
- tcg_out_udiv(s, COND_AL, TCG_REG_R8, args[1], args[2]);
- tcg_out_mul32(s, COND_AL, TCG_REG_R8, TCG_REG_R8, args[2]);
- tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_R8,
+ tcg_out_udiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
+ tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
SHIFT_IMM_LSL(0));
break;
@@ -2051,7 +2054,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
tcg_add_target_add_op_defs(arm_op_defs);
--
1.8.1.4
next prev parent reply other threads:[~2013-03-28 15:33 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-03-28 15:32 [Qemu-devel] [PATCH v3 00/20] tcg-arm improvements Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 01/20] tcg-arm: Use bic to implement and with constant Richard Henderson
2013-03-29 16:53 ` Aurelien Jarno
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 02/20] tcg-arm: Handle negated constant arguments to and/sub Richard Henderson
2013-03-29 16:53 ` Aurelien Jarno
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 03/20] tcg-arm: Allow constant first argument to sub Richard Henderson
2013-03-29 16:58 ` Aurelien Jarno
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 04/20] tcg-arm: Use tcg_out_dat_rIN for compares Richard Henderson
2013-03-29 16:58 ` Aurelien Jarno
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 05/20] tcg-arm: Handle constant arguments to add2/sub2 Richard Henderson
2013-03-28 15:56 ` Peter Maydell
2013-03-28 16:04 ` Richard Henderson
2013-03-28 16:09 ` Laurent Desnogues
2013-03-28 16:16 ` Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 06/20] tcg-arm: Improve constant generation Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 07/20] tcg-arm: Fold epilogue into INDEX_op_exit_tb Richard Henderson
2013-03-28 16:05 ` Peter Maydell
2013-03-28 16:12 ` Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 08/20] tcg-arm: Implement deposit for armv7 Richard Henderson
2013-03-28 16:15 ` Peter Maydell
2013-03-28 16:22 ` Richard Henderson
2013-03-28 16:59 ` Peter Maydell
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 09/20] tcg-arm: Implement division instructions Richard Henderson
2013-03-28 15:32 ` Richard Henderson [this message]
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 11/20] tcg-arm: Use R12 for the tcg temporary Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 12/20] tcg-arm: Cleanup multiply subroutines Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 13/20] tcg-arm: Cleanup tcg_out_goto_label Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 14/20] tcg-arm: Cleanup goto_tb handling Richard Henderson
2013-03-28 20:09 ` Aurelien Jarno
2013-03-28 20:48 ` Richard Henderson
2013-03-29 6:50 ` Aurelien Jarno
2013-03-29 15:06 ` Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 15/20] tcg-arm: Cleanup most primitive load store subroutines Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 16/20] tcg-arm: Fix local stack frame Richard Henderson
2013-03-29 16:50 ` Aurelien Jarno
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 17/20] tcg-arm: Split out tcg_out_tlb_read Richard Henderson
2013-03-28 15:32 ` [Qemu-devel] [PATCH v3 18/20] tcg-arm: Improve scheduling of tcg_out_tlb_read Richard Henderson
2013-03-28 15:33 ` [Qemu-devel] [PATCH v3 19/20] tcg-arm: Use movi32 + blx for calls on v7 Richard Henderson
2013-03-28 15:33 ` [Qemu-devel] [PATCH v3 20/20] tcg-arm: Convert to CONFIG_QEMU_LDST_OPTIMIZATION Richard Henderson
2013-03-28 16:44 ` Peter Maydell
2013-03-28 17:46 ` Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1364484781-15561-11-git-send-email-rth@twiddle.net \
--to=rth@twiddle.net \
--cc=aurelien@aurel32.net \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).