qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH v2 38/67] target/arm: Convert SUQADD and USQADD to gvec
Date: Fri, 24 May 2024 16:20:52 -0700	[thread overview]
Message-ID: <20240524232121.284515-39-richard.henderson@linaro.org> (raw)
In-Reply-To: <20240524232121.284515-1-richard.henderson@linaro.org>

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper.h            |  16 +++++
 target/arm/tcg/translate-a64.h |   6 ++
 target/arm/tcg/gengvec64.c     | 106 +++++++++++++++++++++++++++++++
 target/arm/tcg/translate-a64.c | 113 ++++++++++++++-------------------
 target/arm/tcg/vec_helper.c    |  64 +++++++++++++++++++
 5 files changed, 241 insertions(+), 64 deletions(-)

diff --git a/target/arm/helper.h b/target/arm/helper.h
index f830531dd3..de2c5c9aef 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -836,6 +836,22 @@ DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
 
 DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h
index 91750f0ca9..b5cb26f8a2 100644
--- a/target/arm/tcg/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
@@ -197,6 +197,12 @@ void gen_gvec_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
                    uint32_t a, uint32_t oprsz, uint32_t maxsz);
 void gen_gvec_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
                    uint32_t a, uint32_t oprsz, uint32_t maxsz);
+void gen_gvec_suqadd_qc(unsigned vece, uint32_t rd_ofs,
+                        uint32_t rn_ofs, uint32_t rm_ofs,
+                        uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs,
+                        uint32_t rn_ofs, uint32_t rm_ofs,
+                        uint32_t opr_sz, uint32_t max_sz);
 
 void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
 void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
diff --git a/target/arm/tcg/gengvec64.c b/target/arm/tcg/gengvec64.c
index 093b498b13..4b76e476a0 100644
--- a/target/arm/tcg/gengvec64.c
+++ b/target/arm/tcg/gengvec64.c
@@ -188,3 +188,109 @@ void gen_gvec_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
     tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
 }
 
+static void gen_suqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
+                           TCGv_vec a, TCGv_vec b)
+{
+    TCGv_vec max =
+        tcg_constant_vec_matching(t, vece, (1ull << ((8 << vece) - 1)) - 1);
+    TCGv_vec u = tcg_temp_new_vec_matching(t);
+
+    /* Maximum value that can be added to @a without overflow. */
+    tcg_gen_sub_vec(vece, u, max, a);
+
+    /* Constrain addend so that the next addition never overflows. */
+    tcg_gen_umin_vec(vece, u, u, b);
+    tcg_gen_add_vec(vece, t, u, a);
+
+    /* Compute QC by comparing the adjusted @b. */
+    tcg_gen_xor_vec(vece, u, u, b);
+    tcg_gen_or_vec(vece, qc, qc, u);
+}
+
+void gen_gvec_suqadd_qc(unsigned vece, uint32_t rd_ofs,
+                        uint32_t rn_ofs, uint32_t rm_ofs,
+                        uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_add_vec, INDEX_op_sub_vec, INDEX_op_umin_vec, 0
+    };
+    static const GVecGen4 ops[4] = {
+        { .fniv = gen_suqadd_vec,
+          .fno = gen_helper_gvec_suqadd_b,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_8 },
+        { .fniv = gen_suqadd_vec,
+          .fno = gen_helper_gvec_suqadd_h,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_16 },
+        { .fniv = gen_suqadd_vec,
+          .fno = gen_helper_gvec_suqadd_s,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_32 },
+        { .fniv = gen_suqadd_vec,
+          .fno = gen_helper_gvec_suqadd_d,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_64 },
+    };
+    tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+                   rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_usqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
+                           TCGv_vec a, TCGv_vec b)
+{
+    TCGv_vec u = tcg_temp_new_vec_matching(t);
+    TCGv_vec z = tcg_constant_vec_matching(t, vece, 0);
+
+    /* Compute unsigned saturation of add for +b and sub for -b. */
+    tcg_gen_neg_vec(vece, t, b);
+    tcg_gen_usadd_vec(vece, u, a, b);
+    tcg_gen_ussub_vec(vece, t, a, t);
+
+    /* Select the correct result depending on the sign of b. */
+    tcg_gen_cmpsel_vec(TCG_COND_LT, vece, t, b, z, t, u);
+
+    /* Compute QC by comparing against the non-saturated result. */
+    tcg_gen_add_vec(vece, u, a, b);
+    tcg_gen_xor_vec(vece, u, u, t);
+    tcg_gen_or_vec(vece, qc, qc, u);
+}
+
+void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs,
+                        uint32_t rn_ofs, uint32_t rm_ofs,
+                        uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_neg_vec, INDEX_op_add_vec,
+        INDEX_op_usadd_vec, INDEX_op_ussub_vec,
+        INDEX_op_cmpsel_vec, 0
+    };
+    static const GVecGen4 ops[4] = {
+        { .fniv = gen_usqadd_vec,
+          .fno = gen_helper_gvec_usqadd_b,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_8 },
+        { .fniv = gen_usqadd_vec,
+          .fno = gen_helper_gvec_usqadd_h,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_16 },
+        { .fniv = gen_usqadd_vec,
+          .fno = gen_helper_gvec_usqadd_s,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_32 },
+        { .fniv = gen_usqadd_vec,
+          .fno = gen_helper_gvec_usqadd_d,
+          .opt_opc = vecop_list,
+          .write_aofs = true,
+          .vece = MO_64 },
+    };
+    tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+                   rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 9167e4d0bd..9f948e033e 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -9983,83 +9983,68 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
 
 /* Remaining saturating accumulating ops */
 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
-                                bool is_q, int size, int rn, int rd)
+                                bool is_q, unsigned size, int rn, int rd)
 {
-    bool is_double = (size == 3);
+    if (!is_scalar) {
+        gen_gvec_fn3(s, is_q, rd, rd, rn,
+                     is_u ? gen_gvec_usqadd_qc : gen_gvec_suqadd_qc, size);
+        return;
+    }
 
-    if (is_double) {
+    if (size == 3) {
         TCGv_i64 tcg_rn = tcg_temp_new_i64();
         TCGv_i64 tcg_rd = tcg_temp_new_i64();
-        int pass;
 
-        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
-            read_vec_element(s, tcg_rn, rn, pass, MO_64);
-            read_vec_element(s, tcg_rd, rd, pass, MO_64);
+        read_vec_element(s, tcg_rn, rn, 0, MO_64);
+        read_vec_element(s, tcg_rd, rd, 0, MO_64);
 
-            if (is_u) { /* USQADD */
-                gen_helper_neon_uqadd_s64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-            } else { /* SUQADD */
-                gen_helper_neon_sqadd_u64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-            }
-            write_vec_element(s, tcg_rd, rd, pass, MO_64);
+        if (is_u) { /* USQADD */
+            gen_helper_neon_uqadd_s64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
+        } else { /* SUQADD */
+            gen_helper_neon_sqadd_u64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
         }
-        clear_vec_high(s, !is_scalar, rd);
+        write_vec_element(s, tcg_rd, rd, 0, MO_64);
+        clear_vec_high(s, false, rd);
     } else {
         TCGv_i32 tcg_rn = tcg_temp_new_i32();
         TCGv_i32 tcg_rd = tcg_temp_new_i32();
-        int pass, maxpasses;
 
-        if (is_scalar) {
-            maxpasses = 1;
-        } else {
-            maxpasses = is_q ? 4 : 2;
+        read_vec_element_i32(s, tcg_rn, rn, 0, size);
+        read_vec_element_i32(s, tcg_rd, rd, 0, size);
+
+        if (is_u) { /* USQADD */
+            switch (size) {
+            case 0:
+                gen_helper_neon_uqadd_s8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
+                break;
+            case 1:
+                gen_helper_neon_uqadd_s16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
+                break;
+            case 2:
+                gen_helper_neon_uqadd_s32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
+                break;
+            default:
+                g_assert_not_reached();
+            }
+        } else { /* SUQADD */
+            switch (size) {
+            case 0:
+                gen_helper_neon_sqadd_u8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
+                break;
+            case 1:
+                gen_helper_neon_sqadd_u16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
+                break;
+            case 2:
+                gen_helper_neon_sqadd_u32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
+                break;
+            default:
+                g_assert_not_reached();
+            }
         }
 
-        for (pass = 0; pass < maxpasses; pass++) {
-            if (is_scalar) {
-                read_vec_element_i32(s, tcg_rn, rn, pass, size);
-                read_vec_element_i32(s, tcg_rd, rd, pass, size);
-            } else {
-                read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
-                read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
-            }
-
-            if (is_u) { /* USQADD */
-                switch (size) {
-                case 0:
-                    gen_helper_neon_uqadd_s8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-                    break;
-                case 1:
-                    gen_helper_neon_uqadd_s16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-                    break;
-                case 2:
-                    gen_helper_neon_uqadd_s32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-                    break;
-                default:
-                    g_assert_not_reached();
-                }
-            } else { /* SUQADD */
-                switch (size) {
-                case 0:
-                    gen_helper_neon_sqadd_u8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-                    break;
-                case 1:
-                    gen_helper_neon_sqadd_u16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-                    break;
-                case 2:
-                    gen_helper_neon_sqadd_u32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
-                    break;
-                default:
-                    g_assert_not_reached();
-                }
-            }
-
-            if (is_scalar) {
-                write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
-            }
-            write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
-        }
-        clear_vec_high(s, is_q, rd);
+        write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
+        write_vec_element_i32(s, tcg_rd, rd, 0, MO_32);
+        clear_vec_high(s, false, rd);
     }
 }
 
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 56fea14edb..d8e96386be 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -1555,6 +1555,14 @@ DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX)
 DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX)
 DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX)
 
+DO_SAT(gvec_usqadd_b, int, uint8_t, int8_t, +, 0, UINT8_MAX)
+DO_SAT(gvec_usqadd_h, int, uint16_t, int16_t, +, 0, UINT16_MAX)
+DO_SAT(gvec_usqadd_s, int64_t, uint32_t, int32_t, +, 0, UINT32_MAX)
+
+DO_SAT(gvec_suqadd_b, int, int8_t, uint8_t, +, INT8_MIN, INT8_MAX)
+DO_SAT(gvec_suqadd_h, int, int16_t, uint16_t, +, INT16_MIN, INT16_MAX)
+DO_SAT(gvec_suqadd_s, int64_t, int32_t, uint32_t, +, INT32_MIN, INT32_MAX)
+
 #undef DO_SAT
 
 void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn,
@@ -1645,6 +1653,62 @@ void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn,
     clear_tail(d, oprsz, simd_maxsz(desc));
 }
 
+void HELPER(gvec_usqadd_d)(void *vd, void *vq, void *vn,
+                           void *vm, uint32_t desc)
+{
+    intptr_t i, oprsz = simd_oprsz(desc);
+    uint64_t *d = vd, *n = vn, *m = vm;
+    bool q = false;
+
+    for (i = 0; i < oprsz / 8; i++) {
+        uint64_t nn = n[i];
+        int64_t mm = m[i];
+        uint64_t dd = nn + mm;
+
+        if (mm < 0) {
+            if (nn < (uint64_t)-mm) {
+                dd = 0;
+                q = true;
+            }
+        } else {
+            if (dd < nn) {
+                dd = UINT64_MAX;
+                q = true;
+            }
+        }
+        d[i] = dd;
+    }
+    if (q) {
+        uint32_t *qc = vq;
+        qc[0] = 1;
+    }
+    clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_suqadd_d)(void *vd, void *vq, void *vn,
+                           void *vm, uint32_t desc)
+{
+    intptr_t i, oprsz = simd_oprsz(desc);
+    uint64_t *d = vd, *n = vn, *m = vm;
+    bool q = false;
+
+    for (i = 0; i < oprsz / 8; i++) {
+        int64_t nn = n[i];
+        uint64_t mm = m[i];
+        int64_t dd = nn + mm;
+
+        if (mm > (uint64_t)(INT64_MAX - nn)) {
+            dd = INT64_MAX;
+            q = true;
+        }
+        d[i] = dd;
+    }
+    if (q) {
+        uint32_t *qc = vq;
+        qc[0] = 1;
+    }
+    clear_tail(d, oprsz, simd_maxsz(desc));
+}
 
 #define DO_SRA(NAME, TYPE)                              \
 void HELPER(NAME)(void *vd, void *vn, uint32_t desc)    \
-- 
2.34.1



  parent reply	other threads:[~2024-05-24 23:32 UTC|newest]

Thread overview: 114+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-24 23:20 [PATCH v2 00/67] target/arm: Convert a64 advsimd to decodetree (part 1) Richard Henderson
2024-05-24 23:20 ` [PATCH v2 01/67] target/arm: Add neoverse-n1 to qemu-arm (DO NOT MERGE) Richard Henderson
2024-05-24 23:20 ` [PATCH v2 02/67] target/arm: Use PLD, PLDW, PLI not NOP for t32 Richard Henderson
2024-05-28 13:13   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 03/67] target/arm: Reject incorrect operands to PLD, PLDW, PLI Richard Henderson
2024-05-28 13:18   ` Peter Maydell
2024-05-28 17:36     ` Richard Henderson
2024-05-29 13:32       ` Peter Maydell
2024-05-29 17:39         ` Richard Henderson
2024-05-24 23:20 ` [PATCH v2 04/67] target/arm: Zero-extend writeback for fp16 FCVTZS (scalar, integer) Richard Henderson
2024-05-28 12:48   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 05/67] target/arm: Fix decode of FMOV (hp) vs MOVI Richard Henderson
2024-05-28 12:34   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 06/67] target/arm: Verify sz=0 for Advanced SIMD scalar pairwise (fp16) Richard Henderson
2024-05-28 12:25   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 07/67] target/arm: Split out gengvec.c Richard Henderson
2024-05-24 23:20 ` [PATCH v2 08/67] target/arm: Split out gengvec64.c Richard Henderson
2024-05-24 23:20 ` [PATCH v2 09/67] target/arm: Convert Cryptographic AES to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 10/67] target/arm: Convert Cryptographic 3-register SHA " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 11/67] target/arm: Convert Cryptographic 2-register " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 12/67] target/arm: Convert Cryptographic 3-register SHA512 " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 13/67] target/arm: Convert Cryptographic 2-register " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 14/67] target/arm: Convert Cryptographic 4-register " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 15/67] target/arm: Convert Cryptographic 3-register, imm2 " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 16/67] target/arm: Convert XAR " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 17/67] target/arm: Convert Advanced SIMD copy " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 18/67] target/arm: Convert FMULX " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 19/67] target/arm: Convert FADD, FSUB, FDIV, FMUL " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 20/67] target/arm: Convert FMAX, FMIN, FMAXNM, FMINNM " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 21/67] target/arm: Introduce vfp_load_reg16 Richard Henderson
2024-05-28 12:22   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 22/67] target/arm: Expand vfp neg and abs inline Richard Henderson
2024-05-24 23:20 ` [PATCH v2 23/67] target/arm: Convert FNMUL to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 24/67] target/arm: Convert FMLA, FMLS " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 25/67] target/arm: Convert FCMEQ, FCMGE, FCMGT, FACGE, FACGT " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 26/67] target/arm: Convert FABD " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 27/67] target/arm: Convert FRECPS, FRSQRTS " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 28/67] target/arm: Convert FADDP " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 29/67] target/arm: Convert FMAXP, FMINP, FMAXNMP, FMINNMP " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 30/67] target/arm: Use gvec for neon faddp, fmaxp, fminp Richard Henderson
2024-05-24 23:20 ` [PATCH v2 31/67] target/arm: Convert ADDP to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 32/67] target/arm: Use gvec for neon padd Richard Henderson
2024-05-24 23:20 ` [PATCH v2 33/67] target/arm: Convert SMAXP, SMINP, UMAXP, UMINP to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 34/67] target/arm: Use gvec for neon pmax, pmin Richard Henderson
2024-05-24 23:20 ` [PATCH v2 35/67] target/arm: Convert FMLAL, FMLSL to decodetree Richard Henderson
2024-05-24 23:20 ` [PATCH v2 36/67] target/arm: Convert disas_simd_3same_logic " Richard Henderson
2024-05-24 23:20 ` [PATCH v2 37/67] target/arm: Improve vector UQADD, UQSUB, SQADD, SQSUB Richard Henderson
2024-05-28 15:24   ` Peter Maydell
2024-05-24 23:20 ` Richard Henderson [this message]
2024-05-28 15:37   ` [PATCH v2 38/67] target/arm: Convert SUQADD and USQADD to gvec Peter Maydell
2024-05-28 17:41     ` Richard Henderson
2024-05-24 23:20 ` [PATCH v2 39/67] target/arm: Inline scalar SUQADD and USQADD Richard Henderson
2024-05-28 15:40   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 40/67] target/arm: Inline scalar SQADD, UQADD, SQSUB, UQSUB Richard Henderson
2024-05-28 15:42   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 41/67] target/arm: Convert SQADD, SQSUB, UQADD, UQSUB to decodetree Richard Henderson
2024-05-28 15:44   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 42/67] target/arm: Convert SUQADD, USQADD " Richard Henderson
2024-05-28 15:44   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 43/67] target/arm: Convert SSHL, USHL " Richard Henderson
2024-05-28 15:46   ` Peter Maydell
2024-05-24 23:20 ` [PATCH v2 44/67] target/arm: Convert SRSHL and URSHL (register) to gvec Richard Henderson
2024-05-28 15:51   ` Peter Maydell
2024-05-28 17:30     ` Richard Henderson
2024-05-24 23:20 ` [PATCH v2 45/67] target/arm: Convert SRSHL, URSHL to decodetree Richard Henderson
2024-05-28 15:51   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 46/67] target/arm: Convert SQSHL and UQSHL (register) to gvec Richard Henderson
2024-05-28 15:53   ` Peter Maydell
2024-05-28 17:31     ` Richard Henderson
2024-05-24 23:21 ` [PATCH v2 47/67] target/arm: Convert SQSHL, UQSHL to decodetree Richard Henderson
2024-05-28 15:53   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 48/67] target/arm: Convert SQRSHL and UQRSHL (register) to gvec Richard Henderson
2024-05-28 15:54   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 49/67] target/arm: Convert SQRSHL, UQRSHL to decodetree Richard Henderson
2024-05-28 15:55   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 50/67] target/arm: Convert ADD, SUB (vector) " Richard Henderson
2024-05-28 15:56   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 51/67] target/arm: Convert CMGT, CMHI, CMGE, CMHS, CMTST, CMEQ " Richard Henderson
2024-05-28 15:57   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 52/67] target/arm: Use TCG_COND_TSTNE in gen_cmtst_{i32, i64} Richard Henderson
2024-05-28 15:58   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 53/67] target/arm: Use TCG_COND_TSTNE in gen_cmtst_vec Richard Henderson
2024-05-28 15:59   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 54/67] target/arm: Convert SHADD, UHADD to gvec Richard Henderson
2024-05-28 16:01   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 55/67] target/arm: Convert SHADD, UHADD to decodetree Richard Henderson
2024-05-28 16:01   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 56/67] target/arm: Convert SHSUB, UHSUB to gvec Richard Henderson
2024-05-28 16:02   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 57/67] target/arm: Convert SHSUB, UHSUB to decodetree Richard Henderson
2024-05-28 16:02   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 58/67] target/arm: Convert SRHADD, URHADD to gvec Richard Henderson
2024-05-28 16:03   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 59/67] target/arm: Convert SRHADD, URHADD to decodetree Richard Henderson
2024-05-28 16:04   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 60/67] target/arm: Convert SMAX, SMIN, UMAX, UMIN " Richard Henderson
2024-05-28 16:04   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 61/67] target/arm: Convert SABA, SABD, UABA, UABD " Richard Henderson
2024-05-28 16:05   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 62/67] target/arm: Convert MUL, PMUL " Richard Henderson
2024-05-28 16:06   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 63/67] target/arm: Convert MLA, MLS " Richard Henderson
2024-05-28 16:07   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 64/67] target/arm: Tidy SQDMULH, SQRDMULH (vector) Richard Henderson
2024-05-28 16:08   ` Peter Maydell
2024-05-24 23:21 ` [PATCH v2 65/67] target/arm: Convert SQDMULH, SQRDMULH to decodetree Richard Henderson
2024-05-28 16:10   ` Peter Maydell
2024-05-28 17:27     ` Richard Henderson
2024-05-24 23:21 ` [PATCH v2 66/67] target/arm: Convert FMADD, FMSUB, FNMADD, FNMSUB " Richard Henderson
2024-05-28 16:15   ` Peter Maydell
2024-05-28 17:31     ` Richard Henderson
2024-05-24 23:21 ` [PATCH v2 67/67] target/arm: Convert FCSEL " Richard Henderson
2024-05-28 16:16   ` Peter Maydell
2024-05-28 16:20 ` [PATCH v2 00/67] target/arm: Convert a64 advsimd to decodetree (part 1) Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240524232121.284515-39-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).