From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: Richard Henderson <richard.henderson@linaro.org>
Subject: [PULL 35/53] target/i386: reimplement 0x0f 0x60-0x6f, add AVX
Date: Tue, 18 Oct 2022 15:30:24 +0200 [thread overview]
Message-ID: <20221018133042.856368-36-pbonzini@redhat.com> (raw)
In-Reply-To: <20221018133042.856368-1-pbonzini@redhat.com>
These are both MMX and SSE/AVX instructions, except for vmovdqu. In both
cases the inputs and output is in s->ptr{0,1,2}, so the only difference
between MMX, SSE, and AVX is which helper to call.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/decode-new.c.inc | 42 +++++++
target/i386/tcg/emit.c.inc | 202 +++++++++++++++++++++++++++++++
target/i386/tcg/translate.c | 19 ++-
3 files changed, 262 insertions(+), 1 deletion(-)
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index dc04aa9cbb..8a749f33fd 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -135,6 +135,19 @@ static uint8_t get_modrm(DisasContext *s, CPUX86State *env)
return s->modrm;
}
+static inline const X86OpEntry *decode_by_prefix(DisasContext *s, const X86OpEntry entries[4])
+{
+ if (s->prefix & PREFIX_REPNZ) {
+ return &entries[3];
+ } else if (s->prefix & PREFIX_REPZ) {
+ return &entries[2];
+ } else if (s->prefix & PREFIX_DATA) {
+ return &entries[1];
+ } else {
+ return &entries[0];
+ }
+}
+
static void decode_group17(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{
static const X86GenFunc group17_gen[8] = {
@@ -144,6 +157,17 @@ static void decode_group17(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
entry->gen = group17_gen[op];
}
+static void decode_0F6F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
+{
+ static const X86OpEntry opcodes_0F6F[4] = {
+ X86_OP_ENTRY3(MOVDQ, P,q, None,None, Q,q, vex1 mmx), /* movq */
+ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex1), /* movdqa */
+ X86_OP_ENTRY3(MOVDQ, V,x, None,None, W,x, vex4_unal), /* movdqu */
+ {},
+ };
+ *entry = *decode_by_prefix(s, opcodes_0F6F);
+}
+
static const X86OpEntry opcodes_0F38_00toEF[240] = {
};
@@ -229,8 +253,26 @@ static void decode_0F3A(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
}
static const X86OpEntry opcodes_0F[256] = {
+ [0x60] = X86_OP_ENTRY3(PUNPCKLBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x61] = X86_OP_ENTRY3(PUNPCKLWD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x62] = X86_OP_ENTRY3(PUNPCKLDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x63] = X86_OP_ENTRY3(PACKSSWB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x64] = X86_OP_ENTRY3(PCMPGTB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x65] = X86_OP_ENTRY3(PCMPGTW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x66] = X86_OP_ENTRY3(PCMPGTD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x67] = X86_OP_ENTRY3(PACKUSWB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+
[0x38] = X86_OP_GROUP0(0F38),
[0x3a] = X86_OP_GROUP0(0F3A),
+
+ [0x68] = X86_OP_ENTRY3(PUNPCKHBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x69] = X86_OP_ENTRY3(PUNPCKHWD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x6a] = X86_OP_ENTRY3(PUNPCKHDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x6b] = X86_OP_ENTRY3(PACKSSDW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
+ [0x6c] = X86_OP_ENTRY3(PUNPCKLQDQ, V,x, H,x, W,x, vex4 p_66 avx2_256),
+ [0x6d] = X86_OP_ENTRY3(PUNPCKHQDQ, V,x, H,x, W,x, vex4 p_66 avx2_256),
+ [0x6e] = X86_OP_ENTRY3(MOVD_to, V,x, None,None, E,y, vex5 mmx p_00_66), /* wrong dest Vy on SDM! */
+ [0x6f] = X86_OP_GROUP0(0F6F),
};
static void do_decode_0F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 947deacd37..8dbacc21ed 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -71,6 +71,56 @@ static inline int xmm_offset(MemOp ot)
}
}
+static int vector_reg_offset(X86DecodedOp *op)
+{
+ assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
+
+ if (op->unit == X86_OP_MMX) {
+ return op->offset - mmx_offset(op->ot);
+ } else {
+ return op->offset - xmm_offset(op->ot);
+ }
+}
+
+static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n)
+{
+ int base_ofs = vector_reg_offset(op);
+ switch(ot) {
+ case MO_8:
+ if (op->unit == X86_OP_MMX) {
+ return base_ofs + offsetof(MMXReg, MMX_B(n));
+ } else {
+ return base_ofs + offsetof(ZMMReg, ZMM_B(n));
+ }
+ case MO_16:
+ if (op->unit == X86_OP_MMX) {
+ return base_ofs + offsetof(MMXReg, MMX_W(n));
+ } else {
+ return base_ofs + offsetof(ZMMReg, ZMM_W(n));
+ }
+ case MO_32:
+ if (op->unit == X86_OP_MMX) {
+ return base_ofs + offsetof(MMXReg, MMX_L(n));
+ } else {
+ return base_ofs + offsetof(ZMMReg, ZMM_L(n));
+ }
+ case MO_64:
+ if (op->unit == X86_OP_MMX) {
+ return base_ofs;
+ } else {
+ return base_ofs + offsetof(ZMMReg, ZMM_Q(n));
+ }
+ case MO_128:
+ assert(op->unit == X86_OP_SSE);
+ return base_ofs + offsetof(ZMMReg, ZMM_X(n));
+ case MO_256:
+ assert(op->unit == X86_OP_SSE);
+ return base_ofs + offsetof(ZMMReg, ZMM_Y(n));
+ default:
+ g_assert_not_reached();
+ }
+}
+
static void compute_mmx_offset(X86DecodedOp *op)
{
if (!op->has_ea) {
@@ -183,6 +233,23 @@ static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
}
}
+static TCGv_ptr op_ptr(X86DecodedInsn *decode, int opn)
+{
+ X86DecodedOp *op = &decode->op[opn];
+ if (op->v_ptr) {
+ return op->v_ptr;
+ }
+ op->v_ptr = tcg_temp_new_ptr();
+
+ /* The temporary points to the MMXReg or ZMMReg. */
+ tcg_gen_addi_ptr(op->v_ptr, cpu_env, vector_reg_offset(op));
+ return op->v_ptr;
+}
+
+#define OP_PTR0 op_ptr(decode, 0)
+#define OP_PTR1 op_ptr(decode, 1)
+#define OP_PTR2 op_ptr(decode, 2)
+
static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
{
X86DecodedOp *op = &decode->op[opn];
@@ -216,6 +283,114 @@ static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv
}
}
+static inline int vector_len(DisasContext *s, X86DecodedInsn *decode)
+{
+ if (decode->e.special == X86_SPECIAL_MMX &&
+ !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
+ return 8;
+ }
+ return s->vex_l ? 32 : 16;
+}
+
+static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
+{
+ MemOp ot = decode->op[0].ot;
+ int vec_len = vector_len(s, decode);
+ bool aligned = sse_needs_alignment(s, decode, ot);
+
+ if (!decode->op[0].has_ea) {
+ tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, vec_len, vec_len);
+ return;
+ }
+
+ switch (ot) {
+ case MO_64:
+ gen_stq_env_A0(s, src_ofs);
+ break;
+ case MO_128:
+ gen_sto_env_A0(s, src_ofs, aligned);
+ break;
+ case MO_256:
+ gen_sty_env_A0(s, src_ofs, aligned);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#define BINARY_INT_GVEC(uname, func, ...) \
+static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
+{ \
+ int vec_len = vector_len(s, decode); \
+ \
+ func(__VA_ARGS__, \
+ decode->op[0].offset, decode->op[1].offset, \
+ decode->op[2].offset, vec_len, vec_len); \
+}
+
+BINARY_INT_GVEC(PCMPGTB, tcg_gen_gvec_cmp, TCG_COND_GT, MO_8)
+BINARY_INT_GVEC(PCMPGTW, tcg_gen_gvec_cmp, TCG_COND_GT, MO_16)
+BINARY_INT_GVEC(PCMPGTD, tcg_gen_gvec_cmp, TCG_COND_GT, MO_32)
+
+
+/*
+ * 00 = p* Pq, Qq (if mmx not NULL; no VEX)
+ * 66 = vp* Vx, Hx, Wx
+ *
+ * These are really the same encoding, because 1) V is the same as P when VEX.V
+ * is not present 2) P and Q are the same as H and W apart from MM/XMM
+ */
+static inline void gen_binary_int_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
+ SSEFunc_0_eppp mmx, SSEFunc_0_eppp xmm, SSEFunc_0_eppp ymm)
+{
+ assert(!!mmx == !!(decode->e.special == X86_SPECIAL_MMX));
+
+ if (mmx && (s->prefix & PREFIX_VEX) && !(s->prefix & PREFIX_DATA)) {
+ /* VEX encoding is not applicable to MMX instructions. */
+ gen_illegal_opcode(s);
+ return;
+ }
+ if (!(s->prefix & PREFIX_DATA)) {
+ mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ } else if (!s->vex_l) {
+ xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ } else {
+ ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ }
+}
+
+
+#define BINARY_INT_MMX(uname, lname) \
+static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
+{ \
+ gen_binary_int_sse(s, env, decode, \
+ gen_helper_##lname##_mmx, \
+ gen_helper_##lname##_xmm, \
+ gen_helper_##lname##_ymm); \
+}
+BINARY_INT_MMX(PUNPCKLBW, punpcklbw)
+BINARY_INT_MMX(PUNPCKLWD, punpcklwd)
+BINARY_INT_MMX(PUNPCKLDQ, punpckldq)
+BINARY_INT_MMX(PACKSSWB, packsswb)
+BINARY_INT_MMX(PACKUSWB, packuswb)
+BINARY_INT_MMX(PUNPCKHBW, punpckhbw)
+BINARY_INT_MMX(PUNPCKHWD, punpckhwd)
+BINARY_INT_MMX(PUNPCKHDQ, punpckhdq)
+BINARY_INT_MMX(PACKSSDW, packssdw)
+
+/* Instructions with no MMX equivalent. */
+#define BINARY_INT_SSE(uname, lname) \
+static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
+{ \
+ gen_binary_int_sse(s, env, decode, \
+ NULL, \
+ gen_helper_##lname##_xmm, \
+ gen_helper_##lname##_ymm); \
+}
+
+BINARY_INT_SSE(PUNPCKLQDQ, punpcklqdq)
+BINARY_INT_SSE(PUNPCKHQDQ, punpckhqdq)
+
static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op)
{
TCGv carry_in = NULL;
@@ -383,6 +558,33 @@ static void gen_MOVBE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
}
}
+static void gen_MOVD_to(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ MemOp ot = decode->op[2].ot;
+ int vec_len = vector_len(s, decode);
+ int lo_ofs = vector_elem_offset(&decode->op[0], ot, 0);
+
+ tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
+
+ switch (ot) {
+ case MO_32:
+#ifdef TARGET_X86_64
+ tcg_gen_st32_tl(s->T1, cpu_env, lo_ofs);
+ break;
+ case MO_64:
+#endif
+ tcg_gen_st_tl(s->T1, cpu_env, lo_ofs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void gen_MOVDQ(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_store_sse(s, decode, decode->op[2].offset);
+}
+
static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
MemOp ot = decode->op[0].ot;
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index e2c01af02d..5133b8c23d 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -2935,6 +2935,23 @@ static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
}
+static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
+{
+ int mem_index = s->mem_index;
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
+ MO_LEUQ | (align ? MO_ALIGN_32 : 0));
+ tcg_gen_addi_tl(s->tmp0, s->A0, 8);
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
+ tcg_gen_addi_tl(s->tmp0, s->A0, 16);
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
+ tcg_gen_addi_tl(s->tmp0, s->A0, 24);
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
+}
+
static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset)
{
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0)));
@@ -4764,7 +4781,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
#ifndef CONFIG_USER_ONLY
use_new &= b <= limit;
#endif
- if (use_new && 0) {
+ if (use_new && (b >= 0x160 && b <= 0x16f)) {
disas_insn_new(s, cpu, b + 0x100);
return s->pc;
}
--
2.37.3
next prev parent reply other threads:[~2022-10-18 13:51 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-18 13:29 [PULL 00/53] target/i386, scsi, build patches for 2022-10-18 Paolo Bonzini
2022-10-18 13:29 ` [PULL 01/53] virtio-scsi: Send "REPORTED LUNS CHANGED" sense data upon disk hotplug events Paolo Bonzini
2022-10-18 13:29 ` [PULL 02/53] configure: don't enable firmware for targets that are not built Paolo Bonzini
2022-10-18 13:29 ` [PULL 03/53] scsi: Use device_cold_reset() and bus_cold_reset() Paolo Bonzini
2022-10-18 13:29 ` [PULL 04/53] hw/scsi/vmw_pvscsi.c: Use device_cold_reset() to reset SCSI devices Paolo Bonzini
2022-10-18 13:29 ` [PULL 05/53] hyperv: fix SynIC SINT assertion failure on guest reset Paolo Bonzini
2022-10-18 13:29 ` [PULL 06/53] configure: Avoid using strings binary Paolo Bonzini
2022-10-18 13:29 ` [PULL 07/53] target/i386: Use device_cold_reset() to reset the APIC Paolo Bonzini
2022-10-18 13:29 ` [PULL 08/53] target/i386: Save and restore pc_save before tcg_remove_ops_after Paolo Bonzini
2022-10-18 13:29 ` [PULL 09/53] target/i386: Use MMUAccessType across excp_helper.c Paolo Bonzini
2022-10-18 13:29 ` [PULL 10/53] target/i386: Direct call get_hphys from mmu_translate Paolo Bonzini
2022-10-18 13:30 ` [PULL 11/53] target/i386: Introduce structures for mmu_translate Paolo Bonzini
2022-10-18 13:30 ` [PULL 12/53] target/i386: Reorg GET_HPHYS Paolo Bonzini
2022-10-18 13:30 ` [PULL 13/53] target/i386: Add MMU_PHYS_IDX and MMU_NESTED_IDX Paolo Bonzini
2022-10-18 13:30 ` [PULL 14/53] target/i386: Use MMU_NESTED_IDX for vmload/vmsave Paolo Bonzini
2022-10-18 13:30 ` [PULL 15/53] target/i386: Combine 5 sets of variables in mmu_translate Paolo Bonzini
2022-10-18 13:30 ` [PULL 16/53] target/i386: Use atomic operations for pte updates Paolo Bonzini
2022-10-18 13:30 ` [PULL 17/53] target/i386: Use probe_access_full for final stage2 translation Paolo Bonzini
2022-10-18 13:30 ` [PULL 18/53] target/i386: Define XMMReg and access macros, align ZMM registers Paolo Bonzini
2022-10-18 13:30 ` [PULL 19/53] target/i386: make ldo/sto operations consistent with ldq Paolo Bonzini
2022-10-18 13:30 ` [PULL 20/53] target/i386: make rex_w available even in 32-bit mode Paolo Bonzini
2022-10-18 13:30 ` [PULL 21/53] target/i386: add core of new i386 decoder Paolo Bonzini
2022-10-18 13:30 ` [PULL 22/53] target/i386: add ALU load/writeback core Paolo Bonzini
2022-10-18 13:30 ` [PULL 23/53] target/i386: add CPUID[EAX=7,ECX=0].ECX to DisasContext Paolo Bonzini
2022-10-18 13:30 ` [PULL 24/53] target/i386: add CPUID feature checks to new decoder Paolo Bonzini
2022-10-18 13:30 ` [PULL 25/53] target/i386: add AVX_EN hflag Paolo Bonzini
2022-10-18 13:30 ` [PULL 26/53] target/i386: validate VEX prefixes via the instructions' exception classes Paolo Bonzini
2022-10-18 13:30 ` [PULL 27/53] target/i386: validate SSE prefixes directly in the decoding table Paolo Bonzini
2022-10-18 13:30 ` [PULL 28/53] target/i386: move scalar 0F 38 and 0F 3A instruction to new decoder Paolo Bonzini
2022-10-18 13:30 ` [PULL 29/53] target/i386: Prepare ops_sse_header.h for 256 bit AVX Paolo Bonzini
2022-10-18 13:30 ` [PULL 30/53] target/i386: extend helpers to support VEX.V 3- and 4- operand encodings Paolo Bonzini
2022-10-18 13:30 ` [PULL 31/53] target/i386: support operand merging in binary scalar helpers Paolo Bonzini
2022-10-18 13:30 ` [PULL 32/53] target/i386: provide 3-operand versions of unary " Paolo Bonzini
2022-10-18 13:30 ` [PULL 33/53] target/i386: implement additional AVX comparison operators Paolo Bonzini
2022-10-18 13:30 ` [PULL 34/53] target/i386: Introduce 256-bit vector helpers Paolo Bonzini
2022-10-18 13:30 ` Paolo Bonzini [this message]
2022-10-18 13:30 ` [PULL 36/53] target/i386: reimplement 0x0f 0xd8-0xdf, 0xe8-0xef, 0xf8-0xff, add AVX Paolo Bonzini
2022-10-18 13:30 ` [PULL 37/53] target/i386: reimplement 0x0f 0x50-0x5f, " Paolo Bonzini
2022-10-18 13:30 ` [PULL 38/53] target/i386: reimplement 0x0f 0x78-0x7f, " Paolo Bonzini
2022-10-18 13:30 ` [PULL 39/53] target/i386: reimplement 0x0f 0x70-0x77, " Paolo Bonzini
2022-10-18 13:30 ` [PULL 40/53] target/i386: reimplement 0x0f 0xd0-0xd7, 0xe0-0xe7, 0xf0-0xf7, " Paolo Bonzini
2022-10-18 13:30 ` [PULL 41/53] target/i386: clarify (un)signedness of immediates from 0F3Ah opcodes Paolo Bonzini
2022-10-18 13:30 ` [PULL 42/53] target/i386: reimplement 0x0f 0x3a, add AVX Paolo Bonzini
2022-10-18 13:30 ` [PULL 43/53] target/i386: Use tcg gvec ops for pmovmskb Paolo Bonzini
2022-10-18 13:30 ` [PULL 44/53] target/i386: reimplement 0x0f 0x38, add AVX Paolo Bonzini
2022-10-18 13:30 ` [PULL 45/53] target/i386: reimplement 0x0f 0xc2, 0xc4-0xc6, " Paolo Bonzini
2022-10-18 13:30 ` [PULL 46/53] target/i386: reimplement 0x0f 0x10-0x17, " Paolo Bonzini
2022-10-18 13:30 ` [PULL 47/53] target/i386: reimplement 0x0f 0x28-0x2f, " Paolo Bonzini
2023-05-09 10:52 ` Peter Maydell
2022-10-18 13:30 ` [PULL 48/53] target/i386: implement XSAVE and XRSTOR of AVX registers Paolo Bonzini
2022-10-18 13:30 ` [PULL 49/53] target/i386: implement VLDMXCSR/VSTMXCSR Paolo Bonzini
2022-10-18 13:30 ` [PULL 50/53] target/i386: Enable AVX cpuid bits when using TCG Paolo Bonzini
2022-10-18 13:30 ` [PULL 51/53] tests/tcg: extend SSE tests to AVX Paolo Bonzini
2022-10-18 13:30 ` [PULL 52/53] target/i386: move 3DNow to the new decoder Paolo Bonzini
2022-10-18 13:30 ` [PULL 53/53] target/i386: remove old SSE decoder Paolo Bonzini
2022-10-18 20:01 ` [PULL 00/53] target/i386, scsi, build patches for 2022-10-18 Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221018133042.856368-36-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).