qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: rajav@quicinc.com, qemu-arm@nongnu.org, apazos@quicinc.com
Subject: [PATCH 24/31] target/arm: Create arm_gen_gvec_{u,s}{rshr,rsra}
Date: Thu, 26 Mar 2020 16:08:31 -0700	[thread overview]
Message-ID: <20200326230838.31112-25-richard.henderson@linaro.org> (raw)
In-Reply-To: <20200326230838.31112-1-richard.henderson@linaro.org>

Create vectorized versions of handle_shri_with_rndacc
for shift+round and shift+round+accumulate.  Add out-of-line
helpers in preparation for longer vector lengths from SVE.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper.h        |  20 ++
 target/arm/translate-a64.h |   9 +
 target/arm/translate-a64.c | 458 ++++++++++++++++++++++++++++++++++++-
 target/arm/vec_helper.c    |  50 ++++
 4 files changed, 534 insertions(+), 3 deletions(-)

diff --git a/target/arm/helper.h b/target/arm/helper.h
index dc6a43dbd8..1ffd840f1d 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -718,6 +718,26 @@ DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
 #ifdef TARGET_AARCH64
 #include "helper-a64.h"
 #include "helper-sve.h"
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index 65c0280498..7846e91e51 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -129,4 +129,13 @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
 typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
                         uint32_t, uint32_t, uint32_t);
 
+void arm_gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void arm_gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void arm_gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void arm_gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+
 #endif /* TARGET_ARM_TRANSLATE_A64_H */
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index d50207fcfb..37ee85f867 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -8561,6 +8561,453 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
     }
 }
 
+static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    TCGv_i64 ones = tcg_const_i64(dup_const(MO_8, 1));
+
+    /* Shift one less than the requested amount. */
+    if (shift > 1) {
+        tcg_gen_vec_sar8i_i64(a, a, shift - 1);
+    }
+
+    /* The low bit is the rounding bit.  Mask it off.  */
+    tcg_gen_and_i64(t, a, ones);
+
+    /* Finish the shift. */
+    tcg_gen_vec_sar8i_i64(d, a, 1);
+
+    /* Round. */
+    tcg_gen_vec_add8_i64(d, d, t);
+
+    tcg_temp_free_i64(t);
+    tcg_temp_free_i64(ones);
+}
+
+static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    TCGv_i64 ones = tcg_const_i64(dup_const(MO_16, 1));
+
+    if (shift > 1) {
+        tcg_gen_vec_sar16i_i64(a, a, shift - 1);
+    }
+    tcg_gen_and_i64(t, a, ones);
+    tcg_gen_vec_sar16i_i64(d, a, 1);
+    tcg_gen_vec_add16_i64(d, d, t);
+
+    tcg_temp_free_i64(t);
+    tcg_temp_free_i64(ones);
+}
+
+static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+    TCGv_i32 t = tcg_temp_new_i32();
+
+    tcg_gen_sari_i32(a, a, shift - 1);
+    tcg_gen_andi_i32(t, a, 1);
+    tcg_gen_sari_i32(d, a, 1);
+    tcg_gen_add_i32(d, d, t);
+
+    tcg_temp_free_i32(t);
+}
+
+static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_sari_i64(a, a, shift - 1);
+    tcg_gen_andi_i64(t, a, 1);
+    tcg_gen_sari_i64(d, a, 1);
+    tcg_gen_add_i64(d, d, t);
+
+    tcg_temp_free_i64(t);
+}
+
+static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    TCGv_vec ones = tcg_temp_new_vec_matching(d);
+
+    tcg_gen_sari_vec(vece, a, a, shift - 1);
+    tcg_gen_dupi_vec(vece, ones, 1);
+    tcg_gen_and_vec(vece, t, a, ones);
+    tcg_gen_sari_vec(vece, d, a, 1);
+    tcg_gen_add_vec(vece, d, d, t);
+
+    tcg_temp_free_vec(t);
+    tcg_temp_free_vec(ones);
+}
+
+void arm_gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_sari_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2i ops[4] = {
+        { .fni8 = gen_srshr8_i64,
+          .fniv = gen_srshr_vec,
+          .fno = gen_helper_gvec_srshr_b,
+          .opt_opc = vecop_list,
+          .vece = MO_8 },
+        { .fni8 = gen_srshr16_i64,
+          .fniv = gen_srshr_vec,
+          .fno = gen_helper_gvec_srshr_h,
+          .opt_opc = vecop_list,
+          .vece = MO_16 },
+        { .fni4 = gen_srshr32_i32,
+          .fniv = gen_srshr_vec,
+          .fno = gen_helper_gvec_srshr_s,
+          .opt_opc = vecop_list,
+          .vece = MO_32 },
+        { .fni8 = gen_srshr64_i64,
+          .fniv = gen_srshr_vec,
+          .fno = gen_helper_gvec_srshr_d,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .opt_opc = vecop_list,
+          .vece = MO_64 },
+    };
+
+    /* tszimm encoding produces immediates in the range [1..esize] */
+    tcg_debug_assert(shift > 0);
+    tcg_debug_assert(shift <= (8 << vece));
+
+    if (shift == (8 << vece)) {
+        /*
+         * Shifts larger than the element size are architecturally valid.
+         * Signed results in all sign bits.  With rounding, this produces
+         *   (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
+         * I.e. always zero.
+         */
+        tcg_gen_gvec_dup8i(rd_ofs, opr_sz, max_sz, 0);
+    } else {
+        tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+    }
+}
+
+static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    gen_srshr8_i64(t, a, shift);
+    tcg_gen_vec_add8_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    gen_srshr16_i64(t, a, shift);
+    tcg_gen_vec_add16_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+    TCGv_i32 t = tcg_temp_new_i32();
+    gen_srshr32_i32(t, a, shift);
+    tcg_gen_add_i32(d, d, t);
+    tcg_temp_free_i32(t);
+}
+
+static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    gen_srshr64_i64(t, a, shift);
+    tcg_gen_add_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    gen_srshr_vec(vece, t, a, shift);
+    tcg_gen_add_vec(vece, d, d, t);
+    tcg_temp_free_vec(t);
+}
+
+void arm_gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_sari_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2i ops[4] = {
+        { .fni8 = gen_srsra8_i64,
+          .fniv = gen_srsra_vec,
+          .fno = gen_helper_gvec_srsra_b,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_8 },
+        { .fni8 = gen_srsra16_i64,
+          .fniv = gen_srsra_vec,
+          .fno = gen_helper_gvec_srsra_h,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_16 },
+        { .fni4 = gen_srsra32_i32,
+          .fniv = gen_srsra_vec,
+          .fno = gen_helper_gvec_srsra_s,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_32 },
+        { .fni8 = gen_srsra64_i64,
+          .fniv = gen_srsra_vec,
+          .fno = gen_helper_gvec_srsra_d,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_64 },
+    };
+
+    /* tszimm encoding produces immediates in the range [1..esize] */
+    tcg_debug_assert(shift > 0);
+    tcg_debug_assert(shift <= (8 << vece));
+
+    /*
+     * Shifts larger than the element size are architecturally valid.
+     * Signed results in all sign bits.  With rounding, this produces
+     *   (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
+     * I.e. always zero.  With accumulation, this leaves D unchanged.
+     */
+    if (shift != (8 << vece)) {
+        tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+    } else {
+        /* Nop, but we do need to clear the tail. */
+        tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
+    }
+}
+
+static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    TCGv_i64 ones = tcg_const_i64(dup_const(MO_8, 1));
+
+    /* Shift one less than the requested amount. */
+    if (shift > 1) {
+        tcg_gen_vec_shr8i_i64(a, a, shift - 1);
+    }
+
+    /* The low bit is the rounding bit.  Mask it off.  */
+    tcg_gen_and_i64(t, a, ones);
+
+    /* Finish the shift. */
+    tcg_gen_vec_shr8i_i64(d, a, 1);
+
+    /* Round. */
+    tcg_gen_vec_add8_i64(d, d, t);
+
+    tcg_temp_free_i64(t);
+    tcg_temp_free_i64(ones);
+}
+
+static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    TCGv_i64 ones = tcg_const_i64(dup_const(MO_16, 1));
+
+    if (shift > 1) {
+        tcg_gen_vec_shr16i_i64(a, a, shift - 1);
+    }
+    tcg_gen_and_i64(t, a, ones);
+    tcg_gen_vec_shr16i_i64(d, a, 1);
+    tcg_gen_vec_add16_i64(d, d, t);
+
+    tcg_temp_free_i64(t);
+    tcg_temp_free_i64(ones);
+}
+
+static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+    TCGv_i32 t = tcg_temp_new_i32();
+
+    tcg_gen_shri_i32(a, a, shift - 1);
+    tcg_gen_andi_i32(t, a, 1);
+    tcg_gen_shri_i32(d, a, 1);
+    tcg_gen_add_i32(d, d, t);
+
+    tcg_temp_free_i32(t);
+}
+
+static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_shri_i64(a, a, shift - 1);
+    tcg_gen_andi_i64(t, a, 1);
+    tcg_gen_shri_i64(d, a, 1);
+    tcg_gen_add_i64(d, d, t);
+
+    tcg_temp_free_i64(t);
+}
+
+static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    TCGv_vec ones = tcg_temp_new_vec_matching(d);
+
+    tcg_gen_shri_vec(vece, a, a, shift - 1);
+    tcg_gen_dupi_vec(vece, ones, 1);
+    tcg_gen_and_vec(vece, t, a, ones);
+    tcg_gen_shri_vec(vece, d, a, 1);
+    tcg_gen_add_vec(vece, d, d, t);
+
+    tcg_temp_free_vec(t);
+    tcg_temp_free_vec(ones);
+}
+
+void arm_gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_shri_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2i ops[4] = {
+        { .fni8 = gen_urshr8_i64,
+          .fniv = gen_urshr_vec,
+          .fno = gen_helper_gvec_urshr_b,
+          .opt_opc = vecop_list,
+          .vece = MO_8 },
+        { .fni8 = gen_urshr16_i64,
+          .fniv = gen_urshr_vec,
+          .fno = gen_helper_gvec_urshr_h,
+          .opt_opc = vecop_list,
+          .vece = MO_16 },
+        { .fni4 = gen_urshr32_i32,
+          .fniv = gen_urshr_vec,
+          .fno = gen_helper_gvec_urshr_s,
+          .opt_opc = vecop_list,
+          .vece = MO_32 },
+        { .fni8 = gen_urshr64_i64,
+          .fniv = gen_urshr_vec,
+          .fno = gen_helper_gvec_urshr_d,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .opt_opc = vecop_list,
+          .vece = MO_64 },
+    };
+
+    /* tszimm encoding produces immediates in the range [1..esize] */
+    tcg_debug_assert(shift > 0);
+    tcg_debug_assert(shift <= (8 << vece));
+
+    if (shift == (8 << vece)) {
+        /*
+         * Shifts larger than the element size are architecturally valid.
+         * Unsigned results in zero.  With rounding, this produces a
+         * copy of the most significant bit.
+         */
+        tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift - 1, opr_sz, max_sz);
+    } else {
+        tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+    }
+}
+
+static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    if (shift == 8) {
+        tcg_gen_vec_shr8i_i64(t, a, 7);
+    } else {
+        gen_urshr8_i64(t, a, shift);
+    }
+    tcg_gen_vec_add8_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    if (shift == 16) {
+        tcg_gen_vec_shr16i_i64(t, a, 15);
+    } else {
+        gen_urshr16_i64(t, a, shift);
+    }
+    tcg_gen_vec_add16_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+    TCGv_i32 t = tcg_temp_new_i32();
+
+    if (shift == 32) {
+        tcg_gen_shri_i32(t, a, 31);
+    } else {
+        gen_urshr32_i32(t, a, shift);
+    }
+    tcg_gen_add_i32(d, d, t);
+    tcg_temp_free_i32(t);
+}
+
+static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    if (shift == 64) {
+        tcg_gen_shri_i64(t, a, 63);
+    } else {
+        gen_urshr64_i64(t, a, shift);
+    }
+    tcg_gen_add_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+    if (shift == (8 << vece)) {
+        tcg_gen_shri_vec(vece, t, a, (8 << vece) - 1);
+    } else {
+        gen_urshr_vec(vece, t, a, shift);
+    }
+    tcg_gen_add_vec(vece, d, d, t);
+    tcg_temp_free_vec(t);
+}
+
+void arm_gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+                        int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop_list[] = {
+        INDEX_op_shri_vec, INDEX_op_add_vec, 0
+    };
+    static const GVecGen2i ops[4] = {
+        { .fni8 = gen_ursra8_i64,
+          .fniv = gen_ursra_vec,
+          .fno = gen_helper_gvec_ursra_b,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_8 },
+        { .fni8 = gen_ursra16_i64,
+          .fniv = gen_ursra_vec,
+          .fno = gen_helper_gvec_ursra_h,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_16 },
+        { .fni4 = gen_ursra32_i32,
+          .fniv = gen_ursra_vec,
+          .fno = gen_helper_gvec_ursra_s,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_32 },
+        { .fni8 = gen_ursra64_i64,
+          .fniv = gen_ursra_vec,
+          .fno = gen_helper_gvec_ursra_d,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .opt_opc = vecop_list,
+          .load_dest = true,
+          .vece = MO_64 },
+    };
+
+    /* tszimm encoding produces immediates in the range [1..esize] */
+    tcg_debug_assert(shift > 0);
+    tcg_debug_assert(shift <= (8 << vece));
+
+    tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+}
+
 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
 static void handle_scalar_simd_shri(DisasContext *s,
                                     bool is_u, int immh, int immb,
@@ -10712,10 +11159,15 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
         return;
 
     case 0x04: /* SRSHR / URSHR (rounding) */
-        break;
+        gen_gvec_fn2i(s, is_q, rd, rn, shift,
+                      is_u ? arm_gen_gvec_urshr : arm_gen_gvec_srshr, size);
+        return;
+
     case 0x06: /* SRSRA / URSRA (accum + rounding) */
-        accumulate = true;
-        break;
+        gen_gvec_fn2i(s, is_q, rd, rn, shift,
+                      is_u ? arm_gen_gvec_ursra : arm_gen_gvec_srsra, size);
+        return;
+
     default:
         g_assert_not_reached();
     }
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index aaaccc0a2d..c6a39c188e 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -924,6 +924,56 @@ DO_SRA(gvec_usra_d, uint64_t)
 
 #undef DO_SRA
 
+#define DO_RSHR(NAME, TYPE)                             \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc)    \
+{                                                       \
+    intptr_t i, oprsz = simd_oprsz(desc);               \
+    int shift = simd_data(desc);                        \
+    TYPE *d = vd, *n = vn;                              \
+    for (i = 0; i < oprsz / sizeof(TYPE); i++) {        \
+        TYPE tmp = n[i] >> (shift - 1);                 \
+        d[i] = (tmp >> 1) + (tmp & 1);                  \
+    }                                                   \
+    clear_tail(d, oprsz, simd_maxsz(desc));             \
+}
+
+DO_RSHR(gvec_srshr_b, int8_t)
+DO_RSHR(gvec_srshr_h, int16_t)
+DO_RSHR(gvec_srshr_s, int32_t)
+DO_RSHR(gvec_srshr_d, int64_t)
+
+DO_RSHR(gvec_urshr_b, uint8_t)
+DO_RSHR(gvec_urshr_h, uint16_t)
+DO_RSHR(gvec_urshr_s, uint32_t)
+DO_RSHR(gvec_urshr_d, uint64_t)
+
+#undef DO_RSHR
+
+#define DO_RSRA(NAME, TYPE)                             \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc)    \
+{                                                       \
+    intptr_t i, oprsz = simd_oprsz(desc);               \
+    int shift = simd_data(desc);                        \
+    TYPE *d = vd, *n = vn;                              \
+    for (i = 0; i < oprsz / sizeof(TYPE); i++) {        \
+        TYPE tmp = n[i] >> (shift - 1);                 \
+        d[i] += (tmp >> 1) + (tmp & 1);                 \
+    }                                                   \
+    clear_tail(d, oprsz, simd_maxsz(desc));             \
+}
+
+DO_RSRA(gvec_srsra_b, int8_t)
+DO_RSRA(gvec_srsra_h, int16_t)
+DO_RSRA(gvec_srsra_s, int32_t)
+DO_RSRA(gvec_srsra_d, int64_t)
+
+DO_RSRA(gvec_ursra_b, uint8_t)
+DO_RSRA(gvec_ursra_h, uint16_t)
+DO_RSRA(gvec_ursra_s, uint32_t)
+DO_RSRA(gvec_ursra_d, uint64_t)
+
+#undef DO_RSRA
+
 /*
  * Convert float16 to float32, raising no exceptions and
  * preserving exceptional values, including SNaN.
-- 
2.20.1



  parent reply	other threads:[~2020-03-26 23:21 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-26 23:08 [PATCH for-5.1 00/31] target/arm: SVE2, part 1 Richard Henderson
2020-03-26 23:08 ` [PATCH 01/31] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Richard Henderson
2020-04-11 18:45   ` Alex Bennée
2020-03-26 23:08 ` [PATCH 02/31] target/arm: Implement SVE2 Integer Multiply - Unpredicated Richard Henderson
2020-03-26 23:08 ` [PATCH 03/31] target/arm: Implement SVE2 integer pairwise add and accumulate long Richard Henderson
2020-03-26 23:08 ` [PATCH 04/31] target/arm: Remove fp_status from helper_{recpe, rsqrte}_u32 Richard Henderson
2020-03-26 23:08 ` [PATCH 05/31] target/arm: Implement SVE2 integer unary operations (predicated) Richard Henderson
2020-03-26 23:08 ` [PATCH 06/31] target/arm: Split out saturating/rounding shifts from neon Richard Henderson
2020-03-26 23:08 ` [PATCH 07/31] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Richard Henderson
2020-03-26 23:08 ` [PATCH 08/31] target/arm: Implement SVE2 integer halving add/subtract (predicated) Richard Henderson
2020-03-26 23:08 ` [PATCH 09/31] target/arm: Implement SVE2 integer pairwise arithmetic Richard Henderson
2020-04-13 16:02   ` Laurent Desnogues
2020-03-26 23:08 ` [PATCH 10/31] target/arm: Implement SVE2 saturating add/subtract (predicated) Richard Henderson
2020-03-26 23:08 ` [PATCH 11/31] target/arm: Implement SVE2 integer add/subtract long Richard Henderson
2020-04-13 16:09   ` Laurent Desnogues
2020-03-26 23:08 ` [PATCH 12/31] target/arm: Implement SVE2 integer add/subtract interleaved long Richard Henderson
2020-03-26 23:08 ` [PATCH 13/31] target/arm: Implement SVE2 integer add/subtract wide Richard Henderson
2020-04-13 16:11   ` Laurent Desnogues
2020-03-26 23:08 ` [PATCH 14/31] target/arm: Implement SVE2 integer multiply long Richard Henderson
2020-03-26 23:08 ` [PATCH 15/31] target/arm: Implement PMULLB and PMULLT Richard Henderson
2020-03-26 23:08 ` [PATCH 16/31] target/arm: Tidy SVE tszimm shift formats Richard Henderson
2020-03-26 23:08 ` [PATCH 17/31] target/arm: Implement SVE2 bitwise shift left long Richard Henderson
2020-03-26 23:08 ` [PATCH 18/31] target/arm: Implement SVE2 bitwise exclusive-or interleaved Richard Henderson
2020-03-26 23:08 ` [PATCH 19/31] target/arm: Implement SVE2 bitwise permute Richard Henderson
2020-03-26 23:08 ` [PATCH 20/31] target/arm: Implement SVE2 complex integer add Richard Henderson
2020-04-13 16:20   ` Laurent Desnogues
2020-03-26 23:08 ` [PATCH 21/31] target/arm: Implement SVE2 integer absolute difference and accumulate long Richard Henderson
2020-04-13 16:15   ` Laurent Desnogues
2020-04-13 23:19     ` Richard Henderson
2020-04-14  7:04       ` Laurent Desnogues
2020-03-26 23:08 ` [PATCH 22/31] target/arm: Implement SVE2 integer add/subtract long with carry Richard Henderson
2020-04-13 16:18   ` Laurent Desnogues
2020-03-26 23:08 ` [PATCH 23/31] target/arm: Create arm_gen_gvec_[us]sra Richard Henderson
2020-03-26 23:08 ` Richard Henderson [this message]
2020-03-26 23:08 ` [PATCH 25/31] target/arm: Implement SVE2 bitwise shift right and accumulate Richard Henderson
2020-03-26 23:08 ` [PATCH 26/31] target/arm: Create arm_gen_gvec_{sri,sli} Richard Henderson
2020-03-26 23:08 ` [PATCH 27/31] target/arm: Tidy handle_vec_simd_shri Richard Henderson
2020-03-26 23:08 ` [PATCH 28/31] target/arm: Implement SVE2 bitwise shift and insert Richard Henderson
2020-03-26 23:08 ` [PATCH 29/31] target/arm: Vectorize SABD/UABD Richard Henderson
2020-03-26 23:08 ` [PATCH 30/31] target/arm: Vectorize SABA/UABA Richard Henderson
2020-03-26 23:08 ` [PATCH 31/31] target/arm: Implement SVE2 integer absolute difference and accumulate Richard Henderson
2020-04-01 21:17 ` [PATCH for-5.1 00/31] target/arm: SVE2, part 1 Ana Pazos
2020-04-22  2:51 ` LIU Zhiwei
2020-04-22  2:55   ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200326230838.31112-25-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=apazos@quicinc.com \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=rajav@quicinc.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).