From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Subject: [PATCH 14/18] target/i386/tcg: kill tmp1_i64
Date: Wed, 10 Dec 2025 14:16:49 +0100 [thread overview]
Message-ID: <20251210131653.852163-15-pbonzini@redhat.com> (raw)
In-Reply-To: <20251210131653.852163-1-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/translate.c | 66 ++++++++++++++++++++--------------
target/i386/tcg/emit.c.inc | 72 ++++++++++++++++++++++---------------
2 files changed, 84 insertions(+), 54 deletions(-)
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 8cd70456a51..108276f4008 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -136,7 +136,6 @@ typedef struct DisasContext {
/* TCG local register indexes (only used inside old micro ops) */
TCGv_i32 tmp2_i32;
- TCGv_i64 tmp1_i64;
sigjmp_buf jmpbuf;
TCGOp *prev_insn_start;
@@ -2365,14 +2364,18 @@ static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
static inline void gen_ldq_env_A0(DisasContext *s, int offset)
{
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(t, s->A0, s->mem_index, MO_LEUQ);
+ tcg_gen_st_i64(t, tcg_env, offset);
}
static inline void gen_stq_env_A0(DisasContext *s, int offset)
{
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t, tcg_env, offset);
+ tcg_gen_qemu_st_i64(t, s->A0, s->mem_index, MO_LEUQ);
}
static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
@@ -2452,6 +2455,7 @@ static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
TCGv last_addr = tcg_temp_new();
bool update_fdp = true;
+ TCGv_i64 t64;
tcg_gen_mov_tl(last_addr, ea);
gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
@@ -2472,9 +2476,10 @@ static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
break;
case 0x20 ... 0x27: /* fxxxl */
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
+ t64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t64, s->A0,
s->mem_index, MO_LEUQ);
- gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
+ gen_helper_fldl_FT0(tcg_env, t64);
gen_helper_fp_arith_ST0_FT0(op & 7);
break;
@@ -2496,9 +2501,10 @@ static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
break;
case 0x28: /* fldl */
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
+ t64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t64, s->A0,
s->mem_index, MO_LEUQ);
- gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
+ gen_helper_fldl_ST0(tcg_env, t64);
break;
case 0x38: /* filds */
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
@@ -2513,8 +2519,9 @@ static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
gen_helper_fpop(tcg_env);
break;
case 0x29: /* fisttpll */
- gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
+ t64 = tcg_temp_new_i64();
+ gen_helper_fisttll_ST0(t64, tcg_env);
+ tcg_gen_qemu_st_i64(t64, s->A0,
s->mem_index, MO_LEUQ);
gen_helper_fpop(tcg_env);
break;
@@ -2542,8 +2549,9 @@ static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
}
break;
case 0x2a: case 0x2b: /* fstl, fstpl */
- gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
+ t64 = tcg_temp_new_i64();
+ gen_helper_fstl_ST0(t64, tcg_env);
+ tcg_gen_qemu_st_i64(t64, s->A0,
s->mem_index, MO_LEUQ);
if ((op & 7) == 3) {
gen_helper_fpop(tcg_env);
@@ -2611,13 +2619,15 @@ static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
gen_helper_fpop(tcg_env);
break;
case 0x3d: /* fildll */
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
+ t64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t64, s->A0,
s->mem_index, MO_LEUQ);
- gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
+ gen_helper_fildll_ST0(tcg_env, t64);
break;
case 0x3f: /* fistpll */
- gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
+ t64 = tcg_temp_new_i64();
+ gen_helper_fistll_ST0(t64, tcg_env);
+ tcg_gen_qemu_st_i64(t64, s->A0,
s->mem_index, MO_LEUQ);
gen_helper_fpop(tcg_env);
break;
@@ -2951,6 +2961,7 @@ static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
int modrm = s->modrm;
MemOp ot;
int reg, rm, mod, op;
+ TCGv_i64 t64;
/* now check op code */
switch (b) {
@@ -3142,9 +3153,10 @@ static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
|| (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
}
+ t64 = tcg_temp_new_i64();
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
- tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
+ gen_helper_xgetbv(t64, tcg_env, s->tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], t64);
break;
case 0xd1: /* xsetbv */
@@ -3156,10 +3168,11 @@ static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
if (!check_cpl0(s)) {
break;
}
- tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
+ t64 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
+ gen_helper_xsetbv(tcg_env, s->tmp2_i32, t64);
/* End TB because translation flags may change. */
s->base.is_jmp = DISAS_EOB_NEXT;
break;
@@ -3319,18 +3332,20 @@ static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
goto illegal_op;
}
+ t64 = tcg_temp_new_i64();
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
- tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
+ gen_helper_rdpkru(t64, tcg_env, s->tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], t64);
break;
case 0xef: /* wrpkru */
if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
goto illegal_op;
}
- tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
+ t64 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
+ gen_helper_wrpkru(tcg_env, s->tmp2_i32, t64);
break;
CASE_MODRM_OP(6): /* lmsw */
@@ -3722,7 +3737,6 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
dc->T1 = tcg_temp_new();
dc->A0 = tcg_temp_new();
- dc->tmp1_i64 = tcg_temp_new_i64();
dc->tmp2_i32 = tcg_temp_new_i32();
dc->cc_srcT = tcg_temp_new();
}
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 131aefce53c..8dac4d09da1 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -521,10 +521,12 @@ static void gen_3dnow(DisasContext *s, X86DecodedInsn *decode)
gen_helper_enter_mmx(tcg_env);
if (fn == FN_3DNOW_MOVE) {
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t, tcg_env, decode->op[1].offset);
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset);
} else {
- fn(tcg_env, OP_PTR0, OP_PTR1);
+ fn(tcg_env, OP_PTR0, OP_PTR1);
}
}
@@ -2596,10 +2598,11 @@ static void gen_MOVQ(DisasContext *s, X86DecodedInsn *decode)
{
int vec_len = vector_len(s, decode);
int lo_ofs = vector_elem_offset(&decode->op[0], MO_64, 0);
+ TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
+ tcg_gen_ld_i64(t, tcg_env, decode->op[2].offset);
if (decode->op[0].has_ea) {
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
+ tcg_gen_qemu_st_i64(t, s->A0, s->mem_index, MO_LEUQ);
} else {
/*
* tcg_gen_gvec_dup_i64(MO_64, op0.offset, 8, vec_len, s->tmp1_64) would
@@ -2610,7 +2613,7 @@ static void gen_MOVQ(DisasContext *s, X86DecodedInsn *decode)
* it disqualifies using oprsz < maxsz to emulate VEX128.
*/
tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, lo_ofs);
+ tcg_gen_st_i64(t, tcg_env, lo_ofs);
}
}
@@ -4505,10 +4508,12 @@ static void gen_VMASKMOVPS_st(DisasContext *s, X86DecodedInsn *decode)
static void gen_VMOVHPx_ld(DisasContext *s, X86DecodedInsn *decode)
{
+ TCGv_i64 t = tcg_temp_new_i64();
+
gen_ldq_env_A0(s, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(t, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
}
@@ -4519,33 +4524,39 @@ static void gen_VMOVHPx_st(DisasContext *s, X86DecodedInsn *decode)
static void gen_VMOVHPx(DisasContext *s, X86DecodedInsn *decode)
{
+ TCGv_i64 t = tcg_temp_new_i64();
+
if (decode->op[0].offset != decode->op[2].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(t, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
}
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(t, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
}
static void gen_VMOVHLPS(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(t, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
}
}
static void gen_VMOVLHPS(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t, tcg_env, decode->op[2].offset);
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(t, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
}
@@ -4557,34 +4568,39 @@ static void gen_VMOVLHPS(DisasContext *s, X86DecodedInsn *decode)
static void gen_VMOVLPx(DisasContext *s, X86DecodedInsn *decode)
{
int vec_len = vector_len(s, decode);
+ TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(t, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(0)));
tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(t, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
static void gen_VMOVLPx_ld(DisasContext *s, X86DecodedInsn *decode)
{
int vec_len = vector_len(s, decode);
+ TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(t, s->A0, s->mem_index, MO_LEUQ);
tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
- tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_st_i64(t, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
}
static void gen_VMOVLPx_st(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_ld_i64(s->tmp1_i64, OP_PTR2, offsetof(ZMMReg, ZMM_Q(0)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t, OP_PTR2, offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_qemu_st_i64(t, s->A0, s->mem_index, MO_LEUQ);
}
static void gen_VMOVSD_ld(DisasContext *s, X86DecodedInsn *decode)
{
TCGv_i64 zero = tcg_constant_i64(0);
+ TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(t, s->A0, s->mem_index, MO_LEUQ);
tcg_gen_st_i64(zero, OP_PTR0, offsetof(ZMMReg, ZMM_Q(1)));
- tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_st_i64(t, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
}
static void gen_VMOVSS(DisasContext *s, X86DecodedInsn *decode)
--
2.52.0
next prev parent reply other threads:[~2025-12-10 13:20 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-10 13:16 [PATCH 00/18] First round of target/i386/tcg patches for QEMU 11.0 Paolo Bonzini
2025-12-10 13:16 ` [PATCH 01/18] target/i386/tcg: fix check for invalid VSIB instruction Paolo Bonzini
2025-12-11 15:47 ` Richard Henderson
2025-12-11 20:28 ` Paolo Bonzini
2025-12-11 22:22 ` Richard Henderson
2025-12-12 2:06 ` Paolo Bonzini
2025-12-10 13:16 ` [PATCH 02/18] target/i386/tcg: ignore V3 in 32-bit mode Paolo Bonzini
2025-12-11 15:52 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 03/18] target/i386/tcg: update cc_op after PUSHF Paolo Bonzini
2025-12-11 15:55 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 04/18] target/i386/tcg: mark more instructions that are invalid in 64-bit mode Paolo Bonzini
2025-12-11 15:59 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 05/18] target/i386/tcg: do not compute all flags for SAHF Paolo Bonzini
2025-12-11 16:03 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 06/18] target/i386/tcg: remove do_decode_0F Paolo Bonzini
2025-12-11 16:03 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 07/18] target/i386/tcg: move and expand misplaced comment Paolo Bonzini
2025-12-11 16:04 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 08/18] target/i386/tcg: simplify effective address calculation Paolo Bonzini
2025-12-11 16:15 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 09/18] target/i386/tcg: unnest switch statements in disas_insn_x87 Paolo Bonzini
2025-12-11 16:20 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 10/18] target/i386/tcg: move fcom/fcomp differentiation to gen_helper_fp_arith_ST0_FT0 Paolo Bonzini
2025-12-11 16:21 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 11/18] target/i386/tcg: reuse gen_helper_fp_arith_ST0_FT0 for fcom STn and fcomp STn Paolo Bonzini
2025-12-11 16:24 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 12/18] target/i386/tcg: reuse gen_helper_fp_arith_ST0_FT0 for undocumented fcom/fcomp variants Paolo Bonzini
2025-12-11 16:26 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 13/18] target/i386/tcg: unify more pop/no-pop x87 instructions Paolo Bonzini
2025-12-10 13:16 ` Paolo Bonzini [this message]
2025-12-11 16:28 ` [PATCH 14/18] target/i386/tcg: kill tmp1_i64 Richard Henderson
2025-12-10 13:16 ` [PATCH 15/18] target/i386/tcg: kill tmp2_i32 Paolo Bonzini
2025-12-11 16:29 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 16/18] target/i386/tcg: commonize code to compute SF/ZF/PF Paolo Bonzini
2025-12-11 18:46 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 17/18] target/i386/tcg: add a CCOp for SBB x,x Paolo Bonzini
2025-12-11 19:11 ` Richard Henderson
2025-12-10 13:16 ` [PATCH 18/18] target/i386/tcg: move fetch code out of translate.c Paolo Bonzini
2025-12-11 19:29 ` Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251210131653.852163-15-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).