qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH 44/67] target/arm: Introduce gen_gvec_{s,u}{add,ada}lp
Date: Sun,  1 Dec 2024 09:05:43 -0600	[thread overview]
Message-ID: <20241201150607.12812-45-richard.henderson@linaro.org> (raw)
In-Reply-To: <20241201150607.12812-1-richard.henderson@linaro.org>

Pairwise addition with and without accumulation.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper.h             |   2 -
 target/arm/tcg/translate.h      |   9 ++
 target/arm/tcg/gengvec.c        | 230 ++++++++++++++++++++++++++++++++
 target/arm/tcg/neon_helper.c    |  22 ---
 target/arm/tcg/translate-neon.c | 150 +--------------------
 5 files changed, 243 insertions(+), 170 deletions(-)

diff --git a/target/arm/helper.h b/target/arm/helper.h
index 57e0ce387b..6369d07d05 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -397,8 +397,6 @@ DEF_HELPER_1(neon_widen_s16, i64, i32)
 
 DEF_HELPER_2(neon_addl_u16, i64, i64, i64)
 DEF_HELPER_2(neon_addl_u32, i64, i64, i64)
-DEF_HELPER_2(neon_paddl_u16, i64, i64, i64)
-DEF_HELPER_2(neon_paddl_u32, i64, i64, i64)
 DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
 DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
 DEF_HELPER_2(neon_subl_u16, i64, i64, i64)
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 342ebedafc..edd775d564 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -593,6 +593,15 @@ void gen_gvec_rev32(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
                     uint32_t opr_sz, uint32_t max_sz);
 
+void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz);
+
 /*
  * Forward to the isar_feature_* tests given a DisasContext pointer.
  */
diff --git a/target/arm/tcg/gengvec.c b/target/arm/tcg/gengvec.c
index 33c0a94958..2755da8ac7 100644
--- a/target/arm/tcg/gengvec.c
+++ b/target/arm/tcg/gengvec.c
@@ -2467,3 +2467,233 @@ void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
         g_assert_not_reached();
     }
 }
+
+static void gen_saddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    int half = 4 << vece;
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+    tcg_gen_shli_vec(vece, t, n, half);
+    tcg_gen_sari_vec(vece, d, n, half);
+    tcg_gen_sari_vec(vece, t, t, half);
+    tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_saddlp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_ext32s_i64(t, n);
+    tcg_gen_sari_i64(d, n, 32);
+    tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2 g[] = {
+        { .fniv = gen_saddlp_vec,
+          .fni8 = gen_helper_neon_addlp_s8,
+          .opt_opc = vecop_list,
+          .vece = MO_16 },
+        { .fniv = gen_saddlp_vec,
+          .fni8 = gen_helper_neon_addlp_s16,
+          .opt_opc = vecop_list,
+          .vece = MO_32 },
+        { .fniv = gen_saddlp_vec,
+          .fni8 = gen_saddlp_s_i64,
+          .opt_opc = vecop_list,
+          .vece = MO_64 },
+    };
+    assert(vece <= MO_32);
+    tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_sadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+    gen_saddlp_vec(vece, t, n);
+    tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_sadalp_b_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    gen_helper_neon_addlp_s8(t, n);
+    tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_sadalp_h_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    gen_helper_neon_addlp_s16(t, n);
+    tcg_gen_vec_add32_i64(d, d, t);
+}
+
+static void gen_sadalp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    gen_saddlp_s_i64(t, n);
+    tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2 g[] = {
+        { .fniv = gen_sadalp_vec,
+          .fni8 = gen_sadalp_b_i64,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_16 },
+        { .fniv = gen_sadalp_vec,
+          .fni8 = gen_sadalp_h_i64,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_32 },
+        { .fniv = gen_sadalp_vec,
+          .fni8 = gen_sadalp_s_i64,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_64 },
+    };
+    assert(vece <= MO_32);
+    tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_uaddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    int half = 4 << vece;
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    TCGv_vec m = tcg_constant_vec_matching(d, vece, MAKE_64BIT_MASK(0, half));
+
+    tcg_gen_shri_vec(vece, t, n, half);
+    tcg_gen_and_vec(vece, d, n, m);
+    tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_uaddlp_b_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0xff));
+
+    tcg_gen_shri_i64(t, n, 8);
+    tcg_gen_and_i64(d, n, m);
+    tcg_gen_and_i64(t, t, m);
+    /* No carry between widened unsigned elements. */
+    tcg_gen_add_i64(d, d, t);
+}
+
+static void gen_uaddlp_h_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    TCGv_i64 m = tcg_constant_i64(dup_const(MO_32, 0xffff));
+
+    tcg_gen_shri_i64(t, n, 16);
+    tcg_gen_and_i64(d, n, m);
+    tcg_gen_and_i64(t, t, m);
+    /* No carry between widened unsigned elements. */
+    tcg_gen_add_i64(d, d, t);
+}
+
+static void gen_uaddlp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_ext32u_i64(t, n);
+    tcg_gen_shri_i64(d, n, 32);
+    tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_shri_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2 g[] = {
+        { .fniv = gen_uaddlp_vec,
+          .fni8 = gen_uaddlp_b_i64,
+          .opt_opc = vecop_list,
+          .vece = MO_16 },
+        { .fniv = gen_uaddlp_vec,
+          .fni8 = gen_uaddlp_h_i64,
+          .opt_opc = vecop_list,
+          .vece = MO_32 },
+        { .fniv = gen_uaddlp_vec,
+          .fni8 = gen_uaddlp_s_i64,
+          .opt_opc = vecop_list,
+          .vece = MO_64 },
+    };
+    assert(vece <= MO_32);
+    tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_uadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+    gen_uaddlp_vec(vece, t, n);
+    tcg_gen_add_vec(vece, d, d, t);
+}
+
+static void gen_uadalp_b_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    gen_uaddlp_b_i64(t, n);
+    tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_uadalp_h_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    gen_uaddlp_h_i64(t, n);
+    tcg_gen_vec_add32_i64(d, d, t);
+}
+
+static void gen_uadalp_s_i64(TCGv_i64 d, TCGv_i64 n)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    gen_uaddlp_s_i64(t, n);
+    tcg_gen_add_i64(d, d, t);
+}
+
+void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                     uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_shri_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2 g[] = {
+        { .fniv = gen_uadalp_vec,
+          .fni8 = gen_uadalp_b_i64,
+          .load_dest = true,
+          .opt_opc = vecop_list,
+          .vece = MO_16 },
+        { .fniv = gen_uadalp_vec,
+          .fni8 = gen_uadalp_h_i64,
+          .load_dest = true,
+          .opt_opc = vecop_list,
+          .vece = MO_32 },
+        { .fniv = gen_uadalp_vec,
+          .fni8 = gen_uadalp_s_i64,
+          .load_dest = true,
+          .opt_opc = vecop_list,
+          .vece = MO_64 },
+    };
+    assert(vece <= MO_32);
+    tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]);
+}
diff --git a/target/arm/tcg/neon_helper.c b/target/arm/tcg/neon_helper.c
index b92ddd4914..1a22857b5e 100644
--- a/target/arm/tcg/neon_helper.c
+++ b/target/arm/tcg/neon_helper.c
@@ -844,28 +844,6 @@ uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
     return (a + b) ^ mask;
 }
 
-uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
-{
-    uint64_t tmp;
-    uint64_t tmp2;
-
-    tmp = a & 0x0000ffff0000ffffull;
-    tmp += (a >> 16) & 0x0000ffff0000ffffull;
-    tmp2 = b & 0xffff0000ffff0000ull;
-    tmp2 += (b << 16) & 0xffff0000ffff0000ull;
-    return    ( tmp         & 0xffff)
-            | ((tmp  >> 16) & 0xffff0000ull)
-            | ((tmp2 << 16) & 0xffff00000000ull)
-            | ( tmp2        & 0xffff000000000000ull);
-}
-
-uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
-{
-    uint32_t low = a + (a >> 32);
-    uint32_t high = b + (b >> 32);
-    return low + ((uint64_t)high << 32);
-}
-
 /* Pairwise long add: add pairs of adjacent elements into
  * double-width elements in the result (eg _s8 is an 8x8->16 op)
  */
diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c
index ca6f5578b4..19a18018f1 100644
--- a/target/arm/tcg/translate-neon.c
+++ b/target/arm/tcg/translate-neon.c
@@ -2565,152 +2565,6 @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
     return true;
 }
 
-static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
-                              NeonGenWidenFn *widenfn,
-                              NeonGenTwo64OpFn *opfn,
-                              NeonGenTwo64OpFn *accfn)
-{
-    /*
-     * Pairwise long operations: widen both halves of the pair,
-     * combine the pairs with the opfn, and then possibly accumulate
-     * into the destination with the accfn.
-     */
-    int pass;
-
-    if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
-        return false;
-    }
-
-    /* UNDEF accesses to D16-D31 if they don't exist. */
-    if (!dc_isar_feature(aa32_simd_r32, s) &&
-        ((a->vd | a->vm) & 0x10)) {
-        return false;
-    }
-
-    if ((a->vd | a->vm) & a->q) {
-        return false;
-    }
-
-    if (!widenfn) {
-        return false;
-    }
-
-    if (!vfp_access_check(s)) {
-        return true;
-    }
-
-    for (pass = 0; pass < a->q + 1; pass++) {
-        TCGv_i32 tmp;
-        TCGv_i64 rm0_64, rm1_64, rd_64;
-
-        rm0_64 = tcg_temp_new_i64();
-        rm1_64 = tcg_temp_new_i64();
-        rd_64 = tcg_temp_new_i64();
-
-        tmp = tcg_temp_new_i32();
-        read_neon_element32(tmp, a->vm, pass * 2, MO_32);
-        widenfn(rm0_64, tmp);
-        read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32);
-        widenfn(rm1_64, tmp);
-
-        opfn(rd_64, rm0_64, rm1_64);
-
-        if (accfn) {
-            TCGv_i64 tmp64 = tcg_temp_new_i64();
-            read_neon_element64(tmp64, a->vd, pass, MO_64);
-            accfn(rd_64, tmp64, rd_64);
-        }
-        write_neon_element64(rd_64, a->vd, pass, MO_64);
-    }
-    return true;
-}
-
-static bool trans_VPADDL_S(DisasContext *s, arg_2misc *a)
-{
-    static NeonGenWidenFn * const widenfn[] = {
-        gen_helper_neon_widen_s8,
-        gen_helper_neon_widen_s16,
-        tcg_gen_ext_i32_i64,
-        NULL,
-    };
-    static NeonGenTwo64OpFn * const opfn[] = {
-        gen_helper_neon_paddl_u16,
-        gen_helper_neon_paddl_u32,
-        tcg_gen_add_i64,
-        NULL,
-    };
-
-    return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
-}
-
-static bool trans_VPADDL_U(DisasContext *s, arg_2misc *a)
-{
-    static NeonGenWidenFn * const widenfn[] = {
-        gen_helper_neon_widen_u8,
-        gen_helper_neon_widen_u16,
-        tcg_gen_extu_i32_i64,
-        NULL,
-    };
-    static NeonGenTwo64OpFn * const opfn[] = {
-        gen_helper_neon_paddl_u16,
-        gen_helper_neon_paddl_u32,
-        tcg_gen_add_i64,
-        NULL,
-    };
-
-    return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
-}
-
-static bool trans_VPADAL_S(DisasContext *s, arg_2misc *a)
-{
-    static NeonGenWidenFn * const widenfn[] = {
-        gen_helper_neon_widen_s8,
-        gen_helper_neon_widen_s16,
-        tcg_gen_ext_i32_i64,
-        NULL,
-    };
-    static NeonGenTwo64OpFn * const opfn[] = {
-        gen_helper_neon_paddl_u16,
-        gen_helper_neon_paddl_u32,
-        tcg_gen_add_i64,
-        NULL,
-    };
-    static NeonGenTwo64OpFn * const accfn[] = {
-        gen_helper_neon_addl_u16,
-        gen_helper_neon_addl_u32,
-        tcg_gen_add_i64,
-        NULL,
-    };
-
-    return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
-                             accfn[a->size]);
-}
-
-static bool trans_VPADAL_U(DisasContext *s, arg_2misc *a)
-{
-    static NeonGenWidenFn * const widenfn[] = {
-        gen_helper_neon_widen_u8,
-        gen_helper_neon_widen_u16,
-        tcg_gen_extu_i32_i64,
-        NULL,
-    };
-    static NeonGenTwo64OpFn * const opfn[] = {
-        gen_helper_neon_paddl_u16,
-        gen_helper_neon_paddl_u32,
-        tcg_gen_add_i64,
-        NULL,
-    };
-    static NeonGenTwo64OpFn * const accfn[] = {
-        gen_helper_neon_addl_u16,
-        gen_helper_neon_addl_u32,
-        tcg_gen_add_i64,
-        NULL,
-    };
-
-    return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
-                             accfn[a->size]);
-}
-
 typedef void ZipFn(TCGv_ptr, TCGv_ptr);
 
 static bool do_zip_uzp(DisasContext *s, arg_2misc *a,
@@ -3071,6 +2925,10 @@ DO_2MISC_VEC(VCLT0, gen_gvec_clt0)
 DO_2MISC_VEC(VCLS, gen_gvec_cls)
 DO_2MISC_VEC(VCLZ, gen_gvec_clz)
 DO_2MISC_VEC(VREV64, gen_gvec_rev64)
+DO_2MISC_VEC(VPADDL_S, gen_gvec_saddlp)
+DO_2MISC_VEC(VPADDL_U, gen_gvec_uaddlp)
+DO_2MISC_VEC(VPADAL_S, gen_gvec_sadalp)
+DO_2MISC_VEC(VPADAL_U, gen_gvec_uadalp)
 
 static bool trans_VMVN(DisasContext *s, arg_2misc *a)
 {
-- 
2.43.0



  parent reply	other threads:[~2024-12-01 15:12 UTC|newest]

Thread overview: 147+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-12-01 15:04 [PATCH 00/67] target/arm: AArch64 decodetree conversion, final part Richard Henderson
2024-12-01 15:05 ` [PATCH 01/67] target/arm: Use ### to separate 3rd-level sections in a64.decode Richard Henderson
2024-12-02 14:12   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 02/67] target/arm: Convert UDIV, SDIV to decodetree Richard Henderson
2024-12-02 13:49   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 03/67] target/arm: Convert LSLV, LSRV, ASRV, RORV " Richard Henderson
2024-12-02 19:31   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 04/67] target/arm: Convert CRC32, CRC32C " Richard Henderson
2024-12-02 14:01   ` Philippe Mathieu-Daudé
2024-12-02 17:49     ` Richard Henderson
2024-12-02 21:04       ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 05/67] target/arm: Convert SUBP, IRG, GMI " Richard Henderson
2024-12-02 14:05   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 06/67] target/arm: Convert PACGA " Richard Henderson
2024-12-02 10:30   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 07/67] target/arm: Convert RBIT, REV16, REV32, REV64 " Richard Henderson
2024-12-05 17:22   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 08/67] target/arm: Convert CLZ, CLS " Richard Henderson
2024-12-02 14:08   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 09/67] target/arm: Convert PAC[ID]*, AUT[ID]* " Richard Henderson
2024-12-05 17:29   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 10/67] target/arm: Convert XPAC[ID] " Richard Henderson
2024-12-02 14:11   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 11/67] target/arm: Convert disas_logic_reg " Richard Henderson
2024-12-05 17:38   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 12/67] target/arm: Convert disas_add_sub_ext_reg " Richard Henderson
2024-12-05 17:42   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 13/67] target/arm: Convert disas_add_sub_reg " Richard Henderson
2024-12-05 17:45   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 14/67] target/arm: Convert disas_data_proc_3src " Richard Henderson
2024-12-05 17:55   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 15/67] target/arm: Convert disas_adc_sbc " Richard Henderson
2024-12-05 17:47   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 16/67] target/arm: Convert RMIF " Richard Henderson
2024-12-02  9:58   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 17/67] target/arm: Convert SETF8, SETF16 " Richard Henderson
2024-12-05 20:28   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 18/67] target/arm: Convert CCMP, CCMN " Richard Henderson
2024-12-05 20:32   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 19/67] target/arm: Convert disas_cond_select " Richard Henderson
2024-12-05 20:34   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 20/67] target/arm: Introduce fp_access_check_scalar_hsd Richard Henderson
2024-12-05 20:37   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 21/67] target/arm: Introduce fp_access_check_vector_hsd Richard Henderson
2024-12-05 20:37   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 22/67] target/arm: Convert FCMP, FCMPE, FCCMP, FCCMPE to decodetree Richard Henderson
2024-12-05 21:21   ` Peter Maydell
2024-12-05 21:27     ` Peter Maydell
2024-12-06  1:31       ` Richard Henderson
2024-12-01 15:05 ` [PATCH 23/67] target/arm: Convert FMOV, FABS, FNEG (scalar) " Richard Henderson
2024-12-05 21:12   ` Peter Maydell
2024-12-06  1:52     ` Richard Henderson
2024-12-06  2:34       ` Richard Henderson
2024-12-01 15:05 ` [PATCH 24/67] target/arm: Pass fpstatus to vfp_sqrt* Richard Henderson
2024-12-05 20:44   ` Peter Maydell
2024-12-06  2:01     ` Richard Henderson
2024-12-05 21:20   ` Philippe Mathieu-Daudé
2024-12-05 21:38     ` Peter Maydell
2024-12-05 21:40       ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 25/67] target/arm: Remove helper_sqrt_f16 Richard Henderson
2024-12-04  8:51   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 26/67] target/arm: Convert FSQRT (scalar) to decodetree Richard Henderson
2024-12-06 13:19   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 27/67] target/arm: Convert FRINT[NPMSAXI] " Richard Henderson
2024-12-06 13:23   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 28/67] target/arm: Convert BFCVT " Richard Henderson
2024-12-03 14:05   ` Peter Maydell
2024-12-03 15:28     ` Richard Henderson
2024-12-01 15:05 ` [PATCH 29/67] target/arm: Convert FRINT{32, 64}[ZX] (scalar) " Richard Henderson
2024-12-06 13:25   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 30/67] target/arm: Convert FCVT " Richard Henderson
2024-12-06 13:32   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 31/67] target/arm: Convert handle_fpfpcvt " Richard Henderson
2024-12-06 13:48   ` Peter Maydell
2024-12-06 15:10     ` Richard Henderson
2024-12-01 15:05 ` [PATCH 32/67] target/arm: Convert FJCVTZS " Richard Henderson
2024-12-06 13:51   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 33/67] target/arm: Convert handle_fmov " Richard Henderson
2024-12-06 14:21   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 34/67] target/arm: Convert SQABS, SQNEG " Richard Henderson
2024-12-06 14:28   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 35/67] target/arm: Convert ABS, NEG " Richard Henderson
2024-12-06 14:30   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 36/67] target/arm: Introduce gen_gvec_cls, gen_gvec_clz Richard Henderson
2024-12-02 16:29   ` Philippe Mathieu-Daudé
2024-12-02 17:56     ` Richard Henderson
2024-12-01 15:05 ` [PATCH 37/67] target/arm: Convert CLS, CLZ (vector) to decodetree Richard Henderson
2024-12-06 14:40   ` Peter Maydell
2024-12-06 15:52     ` Richard Henderson
2024-12-01 15:05 ` [PATCH 38/67] target/arm: Introduce gen_gvec_cnt, gen_gvec_rbit Richard Henderson
2024-12-06 15:02   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 39/67] target/arm: Convert CNT, NOT, RBIT (vector) to decodetree Richard Henderson
2024-12-06 15:03   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 40/67] target/arm: Convert CMGT, CMGE, GMLT, GMLE, CMEQ (zero) " Richard Henderson
2024-12-06 15:06   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 41/67] target/arm: Introduce gen_gvec_rev{16,32,64} Richard Henderson
2024-12-06 15:09   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 42/67] target/arm: Convert handle_rev to decodetree Richard Henderson
2024-12-03 11:49   ` Peter Maydell
2024-12-03 14:09     ` Richard Henderson
2024-12-01 15:05 ` [PATCH 43/67] target/arm: Move helper_neon_addlp_{s8, s16} to neon_helper.c Richard Henderson
2024-12-06 15:10   ` Peter Maydell
2024-12-01 15:05 ` Richard Henderson [this message]
2024-12-06 15:36   ` [PATCH 44/67] target/arm: Introduce gen_gvec_{s,u}{add,ada}lp Peter Maydell
2024-12-01 15:05 ` [PATCH 45/67] target/arm: Convert handle_2misc_pairwise to decodetree Richard Henderson
2024-12-06 15:37   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 46/67] target/arm: Remove helper_neon_{add,sub}l_u{16,32} Richard Henderson
2024-12-06 15:15   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 47/67] target/arm: Introduce clear_vec Richard Henderson
2024-12-02 16:33   ` Philippe Mathieu-Daudé
2024-12-01 15:05 ` [PATCH 48/67] target/arm: Convert XTN, SQXTUN, SQXTN, UQXTN to decodetree Richard Henderson
2024-12-06 15:46   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 49/67] target/arm: Convert FCVTN, BFCVTN " Richard Henderson
2024-12-06 15:48   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 50/67] target/arm: Convert FCVTXN " Richard Henderson
2024-12-06 15:50   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 51/67] target/arm: Convert SHLL " Richard Henderson
2024-12-06 15:52   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 52/67] target/arm: Convert FABS, FNEG (vector) " Richard Henderson
2024-12-06 16:03   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 53/67] target/arm: Convert FSQRT " Richard Henderson
2024-12-06 16:11   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 54/67] target/arm: Convert FRINT* " Richard Henderson
2024-12-06 16:12   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 55/67] target/arm: Convert FCVT* (vector, integer) scalar " Richard Henderson
2024-12-06 16:23   ` Peter Maydell
2024-12-06 18:12     ` Richard Henderson
2024-12-01 15:05 ` [PATCH 56/67] target/arm: Convert FCVT* (vector, fixed-point) " Richard Henderson
2024-12-01 15:05 ` [PATCH 57/67] target/arm: Convert [US]CVTF (vector, integer) " Richard Henderson
2024-12-06 16:24   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 58/67] target/arm: Convert [US]CVTF (vector, fixed-point) " Richard Henderson
2024-12-06 16:27   ` Peter Maydell
2024-12-06 18:15     ` Richard Henderson
2024-12-01 15:05 ` [PATCH 59/67] target/arm: Rename helper_gvec_vcvt_[hf][su] with _rz Richard Henderson
2024-12-06 16:28   ` Peter Maydell
2024-12-01 15:05 ` [PATCH 60/67] target/arm: Convert [US]CVTF (vector) to decodetree Richard Henderson
2024-12-01 15:06 ` [PATCH 61/67] target/arm: Convert FCVTZ[SU] (vector, fixed-point) " Richard Henderson
2024-12-01 15:06 ` [PATCH 62/67] target/arm: Convert FCVT* (vector, integer) " Richard Henderson
2024-12-01 15:06 ` [PATCH 63/67] target/arm: Convert handle_2misc_fcmp_zero " Richard Henderson
2024-12-06 16:29   ` Peter Maydell
2024-12-01 15:06 ` [PATCH 64/67] target/arm: Convert FRECPE, FRECPX, FRSQRTE " Richard Henderson
2024-12-06 16:31   ` Peter Maydell
2024-12-01 15:06 ` [PATCH 65/67] target/arm: Introduce gen_gvec_urecpe, gen_gvec_ursqrte Richard Henderson
2024-12-01 15:06 ` [PATCH 66/67] target/arm: Convert URECPE and URSQRTE to decodetree Richard Henderson
2024-12-01 15:06 ` [PATCH 67/67] target/arm: Convert FCVTL " Richard Henderson
2024-12-06 16:33   ` Peter Maydell
2024-12-09 10:29 ` [PATCH 00/67] target/arm: AArch64 decodetree conversion, final part Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241201150607.12812-45-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).