From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org, Peter Maydell <peter.maydell@linaro.org>
Subject: [PATCH v2 24/67] target/arm: Convert FMLA, FMLS to decodetree
Date: Fri, 24 May 2024 16:20:38 -0700 [thread overview]
Message-ID: <20240524232121.284515-25-richard.henderson@linaro.org> (raw)
In-Reply-To: <20240524232121.284515-1-richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper.h | 2 +
target/arm/tcg/a64.decode | 22 +++
target/arm/tcg/translate-a64.c | 241 +++++++++++++++++----------------
target/arm/tcg/vec_helper.c | 14 ++
4 files changed, 163 insertions(+), 116 deletions(-)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 0fd01c9c52..e021c18517 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -770,9 +770,11 @@ DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index cde4b86303..11527bb5e5 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -742,12 +742,26 @@ FMINNM_v 0.00 1110 1.1 ..... 11000 1 ..... ..... @qrrr_sd
FMULX_v 0.00 1110 010 ..... 00011 1 ..... ..... @qrrr_h
FMULX_v 0.00 1110 0.1 ..... 11011 1 ..... ..... @qrrr_sd
+FMLA_v 0.00 1110 010 ..... 00001 1 ..... ..... @qrrr_h
+FMLA_v 0.00 1110 0.1 ..... 11001 1 ..... ..... @qrrr_sd
+
+FMLS_v 0.00 1110 110 ..... 00001 1 ..... ..... @qrrr_h
+FMLS_v 0.00 1110 1.1 ..... 11001 1 ..... ..... @qrrr_sd
+
### Advanced SIMD scalar x indexed element
FMUL_si 0101 1111 00 .. .... 1001 . 0 ..... ..... @rrx_h
FMUL_si 0101 1111 10 . ..... 1001 . 0 ..... ..... @rrx_s
FMUL_si 0101 1111 11 0 ..... 1001 . 0 ..... ..... @rrx_d
+FMLA_si 0101 1111 00 .. .... 0001 . 0 ..... ..... @rrx_h
+FMLA_si 0101 1111 10 .. .... 0001 . 0 ..... ..... @rrx_s
+FMLA_si 0101 1111 11 0. .... 0001 . 0 ..... ..... @rrx_d
+
+FMLS_si 0101 1111 00 .. .... 0101 . 0 ..... ..... @rrx_h
+FMLS_si 0101 1111 10 .. .... 0101 . 0 ..... ..... @rrx_s
+FMLS_si 0101 1111 11 0. .... 0101 . 0 ..... ..... @rrx_d
+
FMULX_si 0111 1111 00 .. .... 1001 . 0 ..... ..... @rrx_h
FMULX_si 0111 1111 10 . ..... 1001 . 0 ..... ..... @rrx_s
FMULX_si 0111 1111 11 0 ..... 1001 . 0 ..... ..... @rrx_d
@@ -758,6 +772,14 @@ FMUL_vi 0.00 1111 00 .. .... 1001 . 0 ..... ..... @qrrx_h
FMUL_vi 0.00 1111 10 . ..... 1001 . 0 ..... ..... @qrrx_s
FMUL_vi 0.00 1111 11 0 ..... 1001 . 0 ..... ..... @qrrx_d
+FMLA_vi 0.00 1111 00 .. .... 0001 . 0 ..... ..... @qrrx_h
+FMLA_vi 0.00 1111 10 . ..... 0001 . 0 ..... ..... @qrrx_s
+FMLA_vi 0.00 1111 11 0 ..... 0001 . 0 ..... ..... @qrrx_d
+
+FMLS_vi 0.00 1111 00 .. .... 0101 . 0 ..... ..... @qrrx_h
+FMLS_vi 0.00 1111 10 . ..... 0101 . 0 ..... ..... @qrrx_s
+FMLS_vi 0.00 1111 11 0 ..... 0101 . 0 ..... ..... @qrrx_d
+
FMULX_vi 0.10 1111 00 .. .... 1001 . 0 ..... ..... @qrrx_h
FMULX_vi 0.10 1111 10 . ..... 1001 . 0 ..... ..... @qrrx_s
FMULX_vi 0.10 1111 11 0 ..... 1001 . 0 ..... ..... @qrrx_d
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 5ba30ba7c8..f84c12378d 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -5066,6 +5066,20 @@ static gen_helper_gvec_3_ptr * const f_vector_fmulx[3] = {
};
TRANS(FMULX_v, do_fp3_vector, a, f_vector_fmulx)
+static gen_helper_gvec_3_ptr * const f_vector_fmla[3] = {
+ gen_helper_gvec_vfma_h,
+ gen_helper_gvec_vfma_s,
+ gen_helper_gvec_vfma_d,
+};
+TRANS(FMLA_v, do_fp3_vector, a, f_vector_fmla)
+
+static gen_helper_gvec_3_ptr * const f_vector_fmls[3] = {
+ gen_helper_gvec_vfms_h,
+ gen_helper_gvec_vfms_s,
+ gen_helper_gvec_vfms_d,
+};
+TRANS(FMLS_v, do_fp3_vector, a, f_vector_fmls)
+
/*
* Advanced SIMD scalar/vector x indexed element
*/
@@ -5115,6 +5129,64 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f)
TRANS(FMUL_si, do_fp3_scalar_idx, a, &f_scalar_fmul)
TRANS(FMULX_si, do_fp3_scalar_idx, a, &f_scalar_fmulx)
+static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg)
+{
+ switch (a->esz) {
+ case MO_64:
+ if (fp_access_check(s)) {
+ TCGv_i64 t0 = read_fp_dreg(s, a->rd);
+ TCGv_i64 t1 = read_fp_dreg(s, a->rn);
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ read_vec_element(s, t2, a->rm, a->idx, MO_64);
+ if (neg) {
+ gen_vfp_negd(t1, t1);
+ }
+ gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR));
+ write_fp_dreg(s, a->rd, t0);
+ }
+ break;
+ case MO_32:
+ if (fp_access_check(s)) {
+ TCGv_i32 t0 = read_fp_sreg(s, a->rd);
+ TCGv_i32 t1 = read_fp_sreg(s, a->rn);
+ TCGv_i32 t2 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, t2, a->rm, a->idx, MO_32);
+ if (neg) {
+ gen_vfp_negs(t1, t1);
+ }
+ gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR));
+ write_fp_sreg(s, a->rd, t0);
+ }
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i32 t0 = read_fp_hreg(s, a->rd);
+ TCGv_i32 t1 = read_fp_hreg(s, a->rn);
+ TCGv_i32 t2 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, t2, a->rm, a->idx, MO_16);
+ if (neg) {
+ gen_vfp_negh(t1, t1);
+ }
+ gen_helper_advsimd_muladdh(t0, t1, t2, t0,
+ fpstatus_ptr(FPST_FPCR_F16));
+ write_fp_sreg(s, a->rd, t0);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+TRANS(FMLA_si, do_fmla_scalar_idx, a, false)
+TRANS(FMLS_si, do_fmla_scalar_idx, a, true)
+
static bool do_fp3_vector_idx(DisasContext *s, arg_qrrx_e *a,
gen_helper_gvec_3_ptr * const fns[3])
{
@@ -5157,6 +5229,42 @@ static gen_helper_gvec_3_ptr * const f_vector_idx_fmulx[3] = {
};
TRANS(FMULX_vi, do_fp3_vector_idx, a, f_vector_idx_fmulx)
+static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg)
+{
+ static gen_helper_gvec_4_ptr * const fns[3] = {
+ gen_helper_gvec_fmla_idx_h,
+ gen_helper_gvec_fmla_idx_s,
+ gen_helper_gvec_fmla_idx_d,
+ };
+ MemOp esz = a->esz;
+
+ switch (esz) {
+ case MO_64:
+ if (!a->q) {
+ return false;
+ }
+ break;
+ case MO_32:
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (fp_access_check(s)) {
+ gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd,
+ esz == MO_16, (a->idx << 1) | neg,
+ fns[esz - 1]);
+ }
+ return true;
+}
+
+TRANS(FMLA_vi, do_fmla_vector_idx, a, false)
+TRANS(FMLS_vi, do_fmla_vector_idx, a, true)
+
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
@@ -9119,15 +9227,6 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
read_vec_element(s, tcg_op2, rm, pass, MO_64);
switch (fpopcode) {
- case 0x39: /* FMLS */
- /* As usual for ARM, separate negation for fused multiply-add */
- gen_vfp_negd(tcg_op1, tcg_op1);
- /* fall through */
- case 0x19: /* FMLA */
- read_vec_element(s, tcg_res, rd, pass, MO_64);
- gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
- tcg_res, fpst);
- break;
case 0x1c: /* FCMEQ */
gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
break;
@@ -9155,10 +9254,12 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
break;
default:
case 0x18: /* FMAXNM */
+ case 0x19: /* FMLA */
case 0x1a: /* FADD */
case 0x1b: /* FMULX */
case 0x1e: /* FMAX */
case 0x38: /* FMINNM */
+ case 0x39: /* FMLS */
case 0x3a: /* FSUB */
case 0x3e: /* FMIN */
case 0x5b: /* FMUL */
@@ -9177,15 +9278,6 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
switch (fpopcode) {
- case 0x39: /* FMLS */
- /* As usual for ARM, separate negation for fused multiply-add */
- gen_vfp_negs(tcg_op1, tcg_op1);
- /* fall through */
- case 0x19: /* FMLA */
- read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
- gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
- tcg_res, fpst);
- break;
case 0x1c: /* FCMEQ */
gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
break;
@@ -9213,10 +9305,12 @@ static void handle_3same_float(DisasContext *s, int size, int elements,
break;
default:
case 0x18: /* FMAXNM */
+ case 0x19: /* FMLA */
case 0x1a: /* FADD */
case 0x1b: /* FMULX */
case 0x1e: /* FMAX */
case 0x38: /* FMINNM */
+ case 0x39: /* FMLS */
case 0x3a: /* FSUB */
case 0x3e: /* FMIN */
case 0x5b: /* FMUL */
@@ -11140,8 +11234,6 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
case 0x3f: /* FRSQRTS */
case 0x5d: /* FACGE */
case 0x7d: /* FACGT */
- case 0x19: /* FMLA */
- case 0x39: /* FMLS */
case 0x1c: /* FCMEQ */
case 0x5c: /* FCMGE */
case 0x7a: /* FABD */
@@ -11174,10 +11266,12 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
default:
case 0x18: /* FMAXNM */
+ case 0x19: /* FMLA */
case 0x1a: /* FADD */
case 0x1b: /* FMULX */
case 0x1e: /* FMAX */
case 0x38: /* FMINNM */
+ case 0x39: /* FMLS */
case 0x3a: /* FSUB */
case 0x3e: /* FMIN */
case 0x5b: /* FMUL */
@@ -11523,10 +11617,8 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
int pass;
switch (fpopcode) {
- case 0x1: /* FMLA */
case 0x4: /* FCMEQ */
case 0x7: /* FRECPS */
- case 0x9: /* FMLS */
case 0xf: /* FRSQRTS */
case 0x14: /* FCMGE */
case 0x15: /* FACGE */
@@ -11544,10 +11636,12 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
break;
default:
case 0x0: /* FMAXNM */
+ case 0x1: /* FMLA */
case 0x2: /* FADD */
case 0x3: /* FMULX */
case 0x6: /* FMAX */
case 0x8: /* FMINNM */
+ case 0x9: /* FMLS */
case 0xa: /* FSUB */
case 0xe: /* FMIN */
case 0x13: /* FMUL */
@@ -11617,24 +11711,12 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
switch (fpopcode) {
- case 0x1: /* FMLA */
- read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
- gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
- fpst);
- break;
case 0x4: /* FCMEQ */
gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
break;
case 0x7: /* FRECPS */
gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0x9: /* FMLS */
- /* As usual for ARM, separate negation for fused multiply-add */
- tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
- read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
- gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
- fpst);
- break;
case 0xf: /* FRSQRTS */
gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
break;
@@ -11656,10 +11738,12 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
break;
default:
case 0x0: /* FMAXNM */
+ case 0x1: /* FMLA */
case 0x2: /* FADD */
case 0x3: /* FMULX */
case 0x6: /* FMAX */
case 0x8: /* FMINNM */
+ case 0x9: /* FMLS */
case 0xa: /* FSUB */
case 0xe: /* FMIN */
case 0x13: /* FMUL */
@@ -12880,10 +12964,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
case 0x0c: /* SQDMULH */
case 0x0d: /* SQRDMULH */
break;
- case 0x01: /* FMLA */
- case 0x05: /* FMLS */
- is_fp = 1;
- break;
case 0x1d: /* SQRDMLAH */
case 0x1f: /* SQRDMLSH */
if (!dc_isar_feature(aa64_rdm, s)) {
@@ -12950,6 +13030,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
/* is_fp, but we pass tcg_env not fp_status. */
break;
default:
+ case 0x01: /* FMLA */
+ case 0x05: /* FMLS */
case 0x09: /* FMUL */
case 0x19: /* FMULX */
unallocated_encoding(s);
@@ -12958,20 +13040,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
switch (is_fp) {
case 1: /* normal fp */
- /* convert insn encoded size to MemOp size */
- switch (size) {
- case 0: /* half-precision */
- size = MO_16;
- is_fp16 = true;
- break;
- case MO_32: /* single precision */
- case MO_64: /* double precision */
- break;
- default:
- unallocated_encoding(s);
- return;
- }
- break;
+ unallocated_encoding(s); /* in decodetree */
+ return;
case 2: /* complex fp */
/* Each indexable element is a complex pair. */
@@ -13150,38 +13220,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
if (size == 3) {
- TCGv_i64 tcg_idx = tcg_temp_new_i64();
- int pass;
-
- assert(is_fp && is_q && !is_long);
-
- read_vec_element(s, tcg_idx, rm, index, MO_64);
-
- for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- TCGv_i64 tcg_res = tcg_temp_new_i64();
-
- read_vec_element(s, tcg_op, rn, pass, MO_64);
-
- switch (16 * u + opcode) {
- case 0x05: /* FMLS */
- /* As usual for ARM, separate negation for fused multiply-add */
- gen_vfp_negd(tcg_op, tcg_op);
- /* fall through */
- case 0x01: /* FMLA */
- read_vec_element(s, tcg_res, rd, pass, MO_64);
- gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
- break;
- default:
- case 0x09: /* FMUL */
- case 0x19: /* FMULX */
- g_assert_not_reached();
- }
-
- write_vec_element(s, tcg_res, rd, pass, MO_64);
- }
-
- clear_vec_high(s, !is_scalar, rd);
+ g_assert_not_reached();
} else if (!is_long) {
/* 32 bit floating point, or 16 or 32 bit integer.
* For the 16 bit scalar case we use the usual Neon helpers and
@@ -13237,38 +13276,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
genfn(tcg_res, tcg_op, tcg_res);
break;
}
- case 0x05: /* FMLS */
- case 0x01: /* FMLA */
- read_vec_element_i32(s, tcg_res, rd, pass,
- is_scalar ? size : MO_32);
- switch (size) {
- case 1:
- if (opcode == 0x5) {
- /* As usual for ARM, separate negation for fused
- * multiply-add */
- tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
- }
- if (is_scalar) {
- gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
- tcg_res, fpst);
- } else {
- gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
- tcg_res, fpst);
- }
- break;
- case 2:
- if (opcode == 0x5) {
- /* As usual for ARM, separate negation for
- * fused multiply-add */
- tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
- }
- gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
- tcg_res, fpst);
- break;
- default:
- g_assert_not_reached();
- }
- break;
case 0x0c: /* SQDMULH */
if (size == 1) {
gen_helper_neon_qdmulh_s16(tcg_res, tcg_env,
@@ -13310,6 +13317,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
break;
default:
+ case 0x01: /* FMLA */
+ case 0x05: /* FMLS */
case 0x09: /* FMUL */
case 0x19: /* FMULX */
g_assert_not_reached();
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 99ef676071..b925b9f21b 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -1309,6 +1309,12 @@ static float32 float32_muladd_f(float32 dest, float32 op1, float32 op2,
return float32_muladd(op1, op2, dest, 0, stat);
}
+static float64 float64_muladd_f(float64 dest, float64 op1, float64 op2,
+ float_status *stat)
+{
+ return float64_muladd(op1, op2, dest, 0, stat);
+}
+
static float16 float16_mulsub_f(float16 dest, float16 op1, float16 op2,
float_status *stat)
{
@@ -1321,6 +1327,12 @@ static float32 float32_mulsub_f(float32 dest, float32 op1, float32 op2,
return float32_muladd(float32_chs(op1), op2, dest, 0, stat);
}
+static float64 float64_mulsub_f(float64 dest, float64 op1, float64 op2,
+ float_status *stat)
+{
+ return float64_muladd(float64_chs(op1), op2, dest, 0, stat);
+}
+
#define DO_MULADD(NAME, FUNC, TYPE) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
{ \
@@ -1340,9 +1352,11 @@ DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32)
DO_MULADD(gvec_vfma_h, float16_muladd_f, float16)
DO_MULADD(gvec_vfma_s, float32_muladd_f, float32)
+DO_MULADD(gvec_vfma_d, float64_muladd_f, float64)
DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
+DO_MULADD(gvec_vfms_d, float64_mulsub_f, float64)
/* For the indexed ops, SVE applies the index per 128-bit vector segment.
* For AdvSIMD, there is of course only one such vector segment.
--
2.34.1
next prev parent reply other threads:[~2024-05-24 23:24 UTC|newest]
Thread overview: 114+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-24 23:20 [PATCH v2 00/67] target/arm: Convert a64 advsimd to decodetree (part 1) Richard Henderson
2024-05-24 23:20 ` [PATCH v2 01/67] target/arm: Add neoverse-n1 to qemu-arm (DO NOT MERGE) Richard Henderson
2024-05-24 23:20 ` [PATCH v2 02/67] target/arm: Use PLD, PLDW, PLI not NOP for t32 Richard Henderson
2024-05-28 13:13 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 03/67] target/arm: Reject incorrect operands to PLD, PLDW, PLI Richard Henderson
2024-05-28 13:18 ` Peter Maydell
2024-05-28 17:36 ` Richard Henderson
2024-05-29 13:32 ` Peter Maydell
2024-05-29 17:39 ` Richard Henderson
2024-05-24 23:20 ` [PATCH v2 04/67] target/arm: Zero-extend writeback for fp16 FCVTZS (scalar, integer) Richard Henderson
2024-05-28 12:48 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 05/67] target/arm: Fix decode of FMOV (hp) vs MOVI Richard Henderson
2024-05-28 12:34 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 06/67] target/arm: Verify sz=0 for Advanced SIMD scalar pairwise (fp16) Richard Henderson
2024-05-28 12:25 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 07/67] target/arm: Split out gengvec.c Richard Henderson
2024-05-24 23:20 ` [PATCH v2 08/67] target/arm: Split out gengvec64.c Richard Henderson
2024-05-24 23:20 ` [PATCH v2 09/67] target/arm: Convert Cryptographic AES to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 10/67] target/arm: Convert Cryptographic 3-register SHA " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 11/67] target/arm: Convert Cryptographic 2-register " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 12/67] target/arm: Convert Cryptographic 3-register SHA512 " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 13/67] target/arm: Convert Cryptographic 2-register " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 14/67] target/arm: Convert Cryptographic 4-register " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 15/67] target/arm: Convert Cryptographic 3-register, imm2 " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 16/67] target/arm: Convert XAR " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 17/67] target/arm: Convert Advanced SIMD copy " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 18/67] target/arm: Convert FMULX " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 19/67] target/arm: Convert FADD, FSUB, FDIV, FMUL " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 20/67] target/arm: Convert FMAX, FMIN, FMAXNM, FMINNM " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 21/67] target/arm: Introduce vfp_load_reg16 Richard Henderson
2024-05-28 12:22 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 22/67] target/arm: Expand vfp neg and abs inline Richard Henderson
2024-05-24 23:20 ` [PATCH v2 23/67] target/arm: Convert FNMUL to decodetree Richard Henderson
2024-05-24 23:20 ` Richard Henderson [this message]
2024-05-24 23:20 ` [PATCH v2 25/67] target/arm: Convert FCMEQ, FCMGE, FCMGT, FACGE, FACGT " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 26/67] target/arm: Convert FABD " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 27/67] target/arm: Convert FRECPS, FRSQRTS " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 28/67] target/arm: Convert FADDP " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 29/67] target/arm: Convert FMAXP, FMINP, FMAXNMP, FMINNMP " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 30/67] target/arm: Use gvec for neon faddp, fmaxp, fminp Richard Henderson
2024-05-24 23:20 ` [PATCH v2 31/67] target/arm: Convert ADDP to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 32/67] target/arm: Use gvec for neon padd Richard Henderson
2024-05-24 23:20 ` [PATCH v2 33/67] target/arm: Convert SMAXP, SMINP, UMAXP, UMINP to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 34/67] target/arm: Use gvec for neon pmax, pmin Richard Henderson
2024-05-24 23:20 ` [PATCH v2 35/67] target/arm: Convert FMLAL, FMLSL to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 36/67] target/arm: Convert disas_simd_3same_logic " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 37/67] target/arm: Improve vector UQADD, UQSUB, SQADD, SQSUB Richard Henderson
2024-05-28 15:24 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 38/67] target/arm: Convert SUQADD and USQADD to gvec Richard Henderson
2024-05-28 15:37 ` Peter Maydell
2024-05-28 17:41 ` Richard Henderson
2024-05-24 23:20 ` [PATCH v2 39/67] target/arm: Inline scalar SUQADD and USQADD Richard Henderson
2024-05-28 15:40 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 40/67] target/arm: Inline scalar SQADD, UQADD, SQSUB, UQSUB Richard Henderson
2024-05-28 15:42 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 41/67] target/arm: Convert SQADD, SQSUB, UQADD, UQSUB to decodetree Richard Henderson
2024-05-28 15:44 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 42/67] target/arm: Convert SUQADD, USQADD " Richard Henderson
2024-05-28 15:44 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 43/67] target/arm: Convert SSHL, USHL " Richard Henderson
2024-05-28 15:46 ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 44/67] target/arm: Convert SRSHL and URSHL (register) to gvec Richard Henderson
2024-05-28 15:51 ` Peter Maydell
2024-05-28 17:30 ` Richard Henderson
2024-05-24 23:20 ` [PATCH v2 45/67] target/arm: Convert SRSHL, URSHL to decodetree Richard Henderson
2024-05-28 15:51 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 46/67] target/arm: Convert SQSHL and UQSHL (register) to gvec Richard Henderson
2024-05-28 15:53 ` Peter Maydell
2024-05-28 17:31 ` Richard Henderson
2024-05-24 23:21 ` [PATCH v2 47/67] target/arm: Convert SQSHL, UQSHL to decodetree Richard Henderson
2024-05-28 15:53 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 48/67] target/arm: Convert SQRSHL and UQRSHL (register) to gvec Richard Henderson
2024-05-28 15:54 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 49/67] target/arm: Convert SQRSHL, UQRSHL to decodetree Richard Henderson
2024-05-28 15:55 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 50/67] target/arm: Convert ADD, SUB (vector) " Richard Henderson
2024-05-28 15:56 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 51/67] target/arm: Convert CMGT, CMHI, CMGE, CMHS, CMTST, CMEQ " Richard Henderson
2024-05-28 15:57 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 52/67] target/arm: Use TCG_COND_TSTNE in gen_cmtst_{i32, i64} Richard Henderson
2024-05-28 15:58 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 53/67] target/arm: Use TCG_COND_TSTNE in gen_cmtst_vec Richard Henderson
2024-05-28 15:59 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 54/67] target/arm: Convert SHADD, UHADD to gvec Richard Henderson
2024-05-28 16:01 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 55/67] target/arm: Convert SHADD, UHADD to decodetree Richard Henderson
2024-05-28 16:01 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 56/67] target/arm: Convert SHSUB, UHSUB to gvec Richard Henderson
2024-05-28 16:02 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 57/67] target/arm: Convert SHSUB, UHSUB to decodetree Richard Henderson
2024-05-28 16:02 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 58/67] target/arm: Convert SRHADD, URHADD to gvec Richard Henderson
2024-05-28 16:03 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 59/67] target/arm: Convert SRHADD, URHADD to decodetree Richard Henderson
2024-05-28 16:04 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 60/67] target/arm: Convert SMAX, SMIN, UMAX, UMIN " Richard Henderson
2024-05-28 16:04 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 61/67] target/arm: Convert SABA, SABD, UABA, UABD " Richard Henderson
2024-05-28 16:05 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 62/67] target/arm: Convert MUL, PMUL " Richard Henderson
2024-05-28 16:06 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 63/67] target/arm: Convert MLA, MLS " Richard Henderson
2024-05-28 16:07 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 64/67] target/arm: Tidy SQDMULH, SQRDMULH (vector) Richard Henderson
2024-05-28 16:08 ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 65/67] target/arm: Convert SQDMULH, SQRDMULH to decodetree Richard Henderson
2024-05-28 16:10 ` Peter Maydell
2024-05-28 17:27 ` Richard Henderson
2024-05-24 23:21 ` [PATCH v2 66/67] target/arm: Convert FMADD, FMSUB, FNMADD, FNMSUB " Richard Henderson
2024-05-28 16:15 ` Peter Maydell
2024-05-28 17:31 ` Richard Henderson
2024-05-24 23:21 ` [PATCH v2 67/67] target/arm: Convert FCSEL " Richard Henderson
2024-05-28 16:16 ` Peter Maydell
2024-05-28 16:20 ` [PATCH v2 00/67] target/arm: Convert a64 advsimd to decodetree (part 1) Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240524232121.284515-25-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).