* [Qemu-devel] [PATCH] alpha: convert cmov and bcond to TCG
@ 2008-09-17 13:01 Tristan Gingold
2008-09-17 15:01 ` [Qemu-devel] [PATCH v2] " Tristan Gingold
2008-09-17 22:06 ` [Qemu-devel] [PATCH] " Aurelien Jarno
0 siblings, 2 replies; 3+ messages in thread
From: Tristan Gingold @ 2008-09-17 13:01 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 183 bytes --]
Hi,
this patch converts cmov and integer conditional branches to TCG.
This was tested with the testsuite.
Tristan.
Signed-off-by: Tristan Gingold <gingold@adacore.com>
Tristan.
[-- Attachment #2: q-alpha6.diff --]
[-- Type: application/octet-stream, Size: 7789 bytes --]
Index: target-alpha/op.c
===================================================================
--- target-alpha/op.c (revision 5239)
+++ target-alpha/op.c (working copy)
@@ -434,72 +434,6 @@
RETURN();
}
-void OPPROTO op_cmpeqz (void)
-{
- if (T0 == 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpnez (void)
-{
- if (T0 != 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpltz (void)
-{
- if ((int64_t)T0 < 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmplez (void)
-{
- if ((int64_t)T0 <= 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpgtz (void)
-{
- if ((int64_t)T0 > 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpgez (void)
-{
- if ((int64_t)T0 >= 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmplbs (void)
-{
- T0 &= 1;
- RETURN();
-}
-
-void OPPROTO op_cmplbc (void)
-{
- T0 = (~T0) & 1;
- RETURN();
-}
-
#if 0 // Qemu does not know how to do this...
void OPPROTO op_bcond (void)
{
Index: target-alpha/translate.c
===================================================================
--- target-alpha/translate.c (revision 5239)
+++ target-alpha/translate.c (working copy)
@@ -328,16 +328,31 @@
}
static always_inline void gen_bcond (DisasContext *ctx,
- void (*gen_test_op)(void),
- int ra, int32_t disp16)
+ TCGCond cond,
+ int ra, int32_t disp16, int mask)
{
- tcg_gen_movi_i64(cpu_T[1], ctx->pc + (int64_t)(disp16 << 2));
- if (ra != 31)
- tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
- else
+ int l1, l2;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ if (ra != 31) {
+ if (mask) {
+ tcg_gen_andi_i64(cpu_T[0], cpu_ir[ra], 1);
+ tcg_gen_brcondi_i64(cond, cpu_T[0], 0, l1);
+ }
+ else
+ tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
+ }
+ else {
+ /* Very uncommon case - Do not bother to optimize. */
tcg_gen_movi_i64(cpu_T[0], 0);
- (*gen_test_op)();
- _gen_op_bcond(ctx);
+ tcg_gen_brcondi_i64(cond, cpu_T[0], 0, l1);
+ }
+ tcg_gen_movi_i64(cpu_pc, ctx->pc);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
+ gen_set_label(l2);
}
static always_inline void gen_fbcond (DisasContext *ctx,
@@ -371,22 +386,38 @@
}
static always_inline void gen_cmov (DisasContext *ctx,
- void (*gen_test_op)(void),
+ TCGCond inv_cond,
int ra, int rb, int rc,
- int islit, uint8_t lit)
+ int islit, uint8_t lit, int mask)
{
- if (ra != 31)
- tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
- else
+ int l1;
+
+ if (rc == 31)
+ return;
+
+ l1 = gen_new_label();
+
+ if (ra != 31) {
+ if (mask) {
+ tcg_gen_andi_i64(cpu_T[0], cpu_ir[ra], 1);
+ tcg_gen_brcondi_i64(inv_cond, cpu_T[0], 0, l1);
+ }
+ else
+ tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
+ }
+ else {
+ /* Very uncommon case - Do not bother to optimize. */
tcg_gen_movi_i64(cpu_T[0], 0);
+ tcg_gen_brcondi_i64(inv_cond, cpu_T[0], 0, l1);
+ }
+
if (islit)
- tcg_gen_movi_i64(cpu_T[1], lit);
+ tcg_gen_movi_i64(cpu_ir[rc], lit);
else if (rb != 31)
- tcg_gen_mov_i64(cpu_T[1], cpu_ir[rb]);
+ tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
else
- tcg_gen_movi_i64(cpu_T[1], 0);
- (*gen_test_op)();
- gen_op_cmov_ir(rc);
+ tcg_gen_movi_i64(cpu_ir[rc], 0);
+ gen_set_label(l1);
}
static always_inline void gen_farith2 (DisasContext *ctx,
@@ -933,11 +964,11 @@
break;
case 0x14:
/* CMOVLBS */
- gen_cmov(ctx, &gen_op_cmplbs, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
break;
case 0x16:
/* CMOVLBC */
- gen_cmov(ctx, &gen_op_cmplbc, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_NE, ra, rb, rc, islit, lit, 1);
break;
case 0x20:
/* BIS */
@@ -961,11 +992,11 @@
break;
case 0x24:
/* CMOVEQ */
- gen_cmov(ctx, &gen_op_cmpeqz, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_NE, ra, rb, rc, islit, lit, 0);
break;
case 0x26:
/* CMOVNE */
- gen_cmov(ctx, &gen_op_cmpnez, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
break;
case 0x28:
/* ORNOT */
@@ -1011,11 +1042,11 @@
break;
case 0x44:
/* CMOVLT */
- gen_cmov(ctx, &gen_op_cmpltz, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_GE, ra, rb, rc, islit, lit, 0);
break;
case 0x46:
/* CMOVGE */
- gen_cmov(ctx, &gen_op_cmpgez, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_LT, ra, rb, rc, islit, lit, 0);
break;
case 0x48:
/* EQV */
@@ -1053,11 +1084,11 @@
break;
case 0x64:
/* CMOVLE */
- gen_cmov(ctx, &gen_op_cmplez, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_GT, ra, rb, rc, islit, lit, 0);
break;
case 0x66:
/* CMOVGT */
- gen_cmov(ctx, &gen_op_cmpgtz, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_LE, ra, rb, rc, islit, lit, 0);
break;
case 0x6C:
/* IMPLVER */
@@ -2173,42 +2204,42 @@
break;
case 0x38:
/* BLBC */
- gen_bcond(ctx, &gen_op_cmplbc, ra, disp16);
+ gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 1);
ret = 1;
break;
case 0x39:
/* BEQ */
- gen_bcond(ctx, &gen_op_cmpeqz, ra, disp16);
+ gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 0);
ret = 1;
break;
case 0x3A:
/* BLT */
- gen_bcond(ctx, &gen_op_cmpltz, ra, disp16);
+ gen_bcond(ctx, TCG_COND_LT, ra, disp16, 0);
ret = 1;
break;
case 0x3B:
/* BLE */
- gen_bcond(ctx, &gen_op_cmplez, ra, disp16);
+ gen_bcond(ctx, TCG_COND_LE, ra, disp16, 0);
ret = 1;
break;
case 0x3C:
/* BLBS */
- gen_bcond(ctx, &gen_op_cmplbs, ra, disp16);
+ gen_bcond(ctx, TCG_COND_NE, ra, disp16, 1);
ret = 1;
break;
case 0x3D:
/* BNE */
- gen_bcond(ctx, &gen_op_cmpnez, ra, disp16);
+ gen_bcond(ctx, TCG_COND_NE, ra, disp16, 0);
ret = 1;
break;
case 0x3E:
/* BGE */
- gen_bcond(ctx, &gen_op_cmpgez, ra, disp16);
+ gen_bcond(ctx, TCG_COND_GE, ra, disp16, 0);
ret = 1;
break;
case 0x3F:
/* BGT */
- gen_bcond(ctx, &gen_op_cmpgtz, ra, disp16);
+ gen_bcond(ctx, TCG_COND_GT, ra, disp16, 0);
ret = 1;
break;
invalid_opc:
@@ -2332,7 +2363,7 @@
}
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
- target_disas(logfile, pc_start, ctx.pc - pc_start, 1);
+ target_disas(logfile, pc_start, ctx.pc - pc_start, 1);
fprintf(logfile, "\n");
}
#endif
^ permalink raw reply [flat|nested] 3+ messages in thread
* [Qemu-devel] [PATCH v2] alpha: convert cmov and bcond to TCG
2008-09-17 13:01 [Qemu-devel] [PATCH] alpha: convert cmov and bcond to TCG Tristan Gingold
@ 2008-09-17 15:01 ` Tristan Gingold
2008-09-17 22:06 ` [Qemu-devel] [PATCH] " Aurelien Jarno
1 sibling, 0 replies; 3+ messages in thread
From: Tristan Gingold @ 2008-09-17 15:01 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 315 bytes --]
Hi,
same as before but with cleanup: unused functions removed.
Tristan.
On Sep 17, 2008, at 3:01 PM, Tristan Gingold wrote:
> Hi,
>
> this patch converts cmov and integer conditional branches to TCG.
>
> This was tested with the testsuite.
>
> Tristan.
>
> Signed-off-by: Tristan Gingold <gingold@adacore.com>
[-- Attachment #2: q-alpha9.diff --]
[-- Type: application/octet-stream, Size: 8601 bytes --]
Index: target-alpha/op.c
===================================================================
--- target-alpha/op.c (revision 5239)
+++ target-alpha/op.c (working copy)
@@ -434,72 +434,6 @@
RETURN();
}
-void OPPROTO op_cmpeqz (void)
-{
- if (T0 == 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpnez (void)
-{
- if (T0 != 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpltz (void)
-{
- if ((int64_t)T0 < 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmplez (void)
-{
- if ((int64_t)T0 <= 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpgtz (void)
-{
- if ((int64_t)T0 > 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmpgez (void)
-{
- if ((int64_t)T0 >= 0)
- T0 = 1;
- else
- T0 = 0;
- RETURN();
-}
-
-void OPPROTO op_cmplbs (void)
-{
- T0 &= 1;
- RETURN();
-}
-
-void OPPROTO op_cmplbc (void)
-{
- T0 = (~T0) & 1;
- RETURN();
-}
-
#if 0 // Qemu does not know how to do this...
void OPPROTO op_bcond (void)
{
Index: target-alpha/op_template.h
===================================================================
--- target-alpha/op_template.h (revision 5239)
+++ target-alpha/op_template.h (working copy)
@@ -37,15 +37,7 @@
#endif /* REG < 3 */
-/* Fixed-point register moves */
#if REG < 31
-void OPPROTO glue(op_cmov_ir, REG) (void)
-{
- if (T0)
- env->ir[REG] = T1;
- RETURN();
-}
-
/* floating point registers moves */
void OPPROTO glue(op_load_FT0_fir, REG) (void)
{
Index: target-alpha/translate.c
===================================================================
--- target-alpha/translate.c (revision 5239)
+++ target-alpha/translate.c (working copy)
@@ -124,11 +124,6 @@
NAME ## _table[n](); \
}
-/* IR moves */
-/* Special hacks for ir31 */
-#define gen_op_cmov_ir31 gen_op_nop
-GEN32(gen_op_cmov_ir, gen_op_cmov_ir);
-
/* FIR moves */
/* Special hacks for fir31 */
#define gen_op_load_FT0_fir31 gen_op_reset_FT0
@@ -328,16 +323,31 @@
}
static always_inline void gen_bcond (DisasContext *ctx,
- void (*gen_test_op)(void),
- int ra, int32_t disp16)
+ TCGCond cond,
+ int ra, int32_t disp16, int mask)
{
- tcg_gen_movi_i64(cpu_T[1], ctx->pc + (int64_t)(disp16 << 2));
- if (ra != 31)
- tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
- else
+ int l1, l2;
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ if (ra != 31) {
+ if (mask) {
+ tcg_gen_andi_i64(cpu_T[0], cpu_ir[ra], 1);
+ tcg_gen_brcondi_i64(cond, cpu_T[0], 0, l1);
+ }
+ else
+ tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
+ }
+ else {
+ /* Very uncommon case - Do not bother to optimize. */
tcg_gen_movi_i64(cpu_T[0], 0);
- (*gen_test_op)();
- _gen_op_bcond(ctx);
+ tcg_gen_brcondi_i64(cond, cpu_T[0], 0, l1);
+ }
+ tcg_gen_movi_i64(cpu_pc, ctx->pc);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
+ gen_set_label(l2);
}
static always_inline void gen_fbcond (DisasContext *ctx,
@@ -371,22 +381,38 @@
}
static always_inline void gen_cmov (DisasContext *ctx,
- void (*gen_test_op)(void),
+ TCGCond inv_cond,
int ra, int rb, int rc,
- int islit, uint8_t lit)
+ int islit, uint8_t lit, int mask)
{
- if (ra != 31)
- tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
- else
+ int l1;
+
+ if (rc == 31)
+ return;
+
+ l1 = gen_new_label();
+
+ if (ra != 31) {
+ if (mask) {
+ tcg_gen_andi_i64(cpu_T[0], cpu_ir[ra], 1);
+ tcg_gen_brcondi_i64(inv_cond, cpu_T[0], 0, l1);
+ }
+ else
+ tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
+ }
+ else {
+ /* Very uncommon case - Do not bother to optimize. */
tcg_gen_movi_i64(cpu_T[0], 0);
+ tcg_gen_brcondi_i64(inv_cond, cpu_T[0], 0, l1);
+ }
+
if (islit)
- tcg_gen_movi_i64(cpu_T[1], lit);
+ tcg_gen_movi_i64(cpu_ir[rc], lit);
else if (rb != 31)
- tcg_gen_mov_i64(cpu_T[1], cpu_ir[rb]);
+ tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
else
- tcg_gen_movi_i64(cpu_T[1], 0);
- (*gen_test_op)();
- gen_op_cmov_ir(rc);
+ tcg_gen_movi_i64(cpu_ir[rc], 0);
+ gen_set_label(l1);
}
static always_inline void gen_farith2 (DisasContext *ctx,
@@ -933,11 +959,11 @@
break;
case 0x14:
/* CMOVLBS */
- gen_cmov(ctx, &gen_op_cmplbs, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
break;
case 0x16:
/* CMOVLBC */
- gen_cmov(ctx, &gen_op_cmplbc, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_NE, ra, rb, rc, islit, lit, 1);
break;
case 0x20:
/* BIS */
@@ -961,11 +987,11 @@
break;
case 0x24:
/* CMOVEQ */
- gen_cmov(ctx, &gen_op_cmpeqz, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_NE, ra, rb, rc, islit, lit, 0);
break;
case 0x26:
/* CMOVNE */
- gen_cmov(ctx, &gen_op_cmpnez, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
break;
case 0x28:
/* ORNOT */
@@ -1011,11 +1037,11 @@
break;
case 0x44:
/* CMOVLT */
- gen_cmov(ctx, &gen_op_cmpltz, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_GE, ra, rb, rc, islit, lit, 0);
break;
case 0x46:
/* CMOVGE */
- gen_cmov(ctx, &gen_op_cmpgez, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_LT, ra, rb, rc, islit, lit, 0);
break;
case 0x48:
/* EQV */
@@ -1053,11 +1079,11 @@
break;
case 0x64:
/* CMOVLE */
- gen_cmov(ctx, &gen_op_cmplez, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_GT, ra, rb, rc, islit, lit, 0);
break;
case 0x66:
/* CMOVGT */
- gen_cmov(ctx, &gen_op_cmpgtz, ra, rb, rc, islit, lit);
+ gen_cmov(ctx, TCG_COND_LE, ra, rb, rc, islit, lit, 0);
break;
case 0x6C:
/* IMPLVER */
@@ -2173,42 +2199,42 @@
break;
case 0x38:
/* BLBC */
- gen_bcond(ctx, &gen_op_cmplbc, ra, disp16);
+ gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 1);
ret = 1;
break;
case 0x39:
/* BEQ */
- gen_bcond(ctx, &gen_op_cmpeqz, ra, disp16);
+ gen_bcond(ctx, TCG_COND_EQ, ra, disp16, 0);
ret = 1;
break;
case 0x3A:
/* BLT */
- gen_bcond(ctx, &gen_op_cmpltz, ra, disp16);
+ gen_bcond(ctx, TCG_COND_LT, ra, disp16, 0);
ret = 1;
break;
case 0x3B:
/* BLE */
- gen_bcond(ctx, &gen_op_cmplez, ra, disp16);
+ gen_bcond(ctx, TCG_COND_LE, ra, disp16, 0);
ret = 1;
break;
case 0x3C:
/* BLBS */
- gen_bcond(ctx, &gen_op_cmplbs, ra, disp16);
+ gen_bcond(ctx, TCG_COND_NE, ra, disp16, 1);
ret = 1;
break;
case 0x3D:
/* BNE */
- gen_bcond(ctx, &gen_op_cmpnez, ra, disp16);
+ gen_bcond(ctx, TCG_COND_NE, ra, disp16, 0);
ret = 1;
break;
case 0x3E:
/* BGE */
- gen_bcond(ctx, &gen_op_cmpgez, ra, disp16);
+ gen_bcond(ctx, TCG_COND_GE, ra, disp16, 0);
ret = 1;
break;
case 0x3F:
/* BGT */
- gen_bcond(ctx, &gen_op_cmpgtz, ra, disp16);
+ gen_bcond(ctx, TCG_COND_GT, ra, disp16, 0);
ret = 1;
break;
invalid_opc:
@@ -2332,7 +2358,7 @@
}
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
- target_disas(logfile, pc_start, ctx.pc - pc_start, 1);
+ target_disas(logfile, pc_start, ctx.pc - pc_start, 1);
fprintf(logfile, "\n");
}
#endif
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH] alpha: convert cmov and bcond to TCG
2008-09-17 13:01 [Qemu-devel] [PATCH] alpha: convert cmov and bcond to TCG Tristan Gingold
2008-09-17 15:01 ` [Qemu-devel] [PATCH v2] " Tristan Gingold
@ 2008-09-17 22:06 ` Aurelien Jarno
1 sibling, 0 replies; 3+ messages in thread
From: Aurelien Jarno @ 2008-09-17 22:06 UTC (permalink / raw)
To: qemu-devel
On Wed, Sep 17, 2008 at 03:01:12PM +0200, Tristan Gingold wrote:
> Hi,
>
> this patch converts cmov and integer conditional branches to TCG.
>
> This was tested with the testsuite.
Applied with minor changes to avoir the use of cpu_T[]. Thanks.
> Tristan.
>
> Signed-off-by: Tristan Gingold <gingold@adacore.com>
>
> Tristan.
>
--
.''`. Aurelien Jarno | GPG: 1024D/F1BCDB73
: :' : Debian developer | Electrical Engineer
`. `' aurel32@debian.org | aurelien@aurel32.net
`- people.debian.org/~aurel32 | www.aurel32.net
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2008-09-17 22:06 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-09-17 13:01 [Qemu-devel] [PATCH] alpha: convert cmov and bcond to TCG Tristan Gingold
2008-09-17 15:01 ` [Qemu-devel] [PATCH v2] " Tristan Gingold
2008-09-17 22:06 ` [Qemu-devel] [PATCH] " Aurelien Jarno
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).