From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org
Subject: [PATCH v2 08/15] target/arm: Wrap vector cmtst/ushl/sshl GVecGen3 in GVecGen3Fn
Date: Sat, 2 May 2020 15:44:56 -0700 [thread overview]
Message-ID: <20200502224503.2282-9-richard.henderson@linaro.org> (raw)
In-Reply-To: <20200502224503.2282-1-richard.henderson@linaro.org>
Provide a functional interface for the vector expansion.
This fits better with the existing set of helpers that
we provide for other operations.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate.h | 10 ++-
target/arm/translate-a64.c | 18 ++---
target/arm/translate.c | 159 ++++++++++++++++++++-----------------
3 files changed, 101 insertions(+), 86 deletions(-)
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 4fbcdf1294..b3e47e7a7f 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -291,9 +291,13 @@ void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
-extern const GVecGen3 cmtst_op[4];
-extern const GVecGen3 sshl_op[4];
-extern const GVecGen3 ushl_op[4];
+void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
extern const GVecGen4 uqadd_op[4];
extern const GVecGen4 sqadd_op[4];
extern const GVecGen4 uqsub_op[4];
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 2b5ae4d43a..2be6ab541e 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -594,15 +594,6 @@ static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
is_q ? 16 : 8, vec_full_reg_size(s));
}
-/* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
-static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
- int rn, int rm, const GVecGen3 *gvec_op)
-{
- tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm), is_q ? 16 : 8,
- vec_full_reg_size(s), gvec_op);
-}
-
/* Expand a 3-operand operation using an out-of-line helper. */
static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
int rn, int rm, int data, gen_helper_gvec_3 *fn)
@@ -11210,8 +11201,11 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
(u ? uqsub_op : sqsub_op) + size);
return;
case 0x08: /* SSHL, USHL */
- gen_gvec_op3(s, is_q, rd, rn, rm,
- u ? &ushl_op[size] : &sshl_op[size]);
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
+ }
return;
case 0x0c: /* SMAX, UMAX */
if (u) {
@@ -11250,7 +11244,7 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
return;
case 0x11:
if (!u) { /* CMTST */
- gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]);
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
return;
}
/* else CMEQ */
diff --git a/target/arm/translate.c b/target/arm/translate.c
index da807242ff..e5aa78c88a 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -4878,27 +4878,31 @@ static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
}
-static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
-
-const GVecGen3 cmtst_op[4] = {
- { .fni4 = gen_helper_neon_tst_u8,
- .fniv = gen_cmtst_vec,
- .opt_opc = vecop_list_cmtst,
- .vece = MO_8 },
- { .fni4 = gen_helper_neon_tst_u16,
- .fniv = gen_cmtst_vec,
- .opt_opc = vecop_list_cmtst,
- .vece = MO_16 },
- { .fni4 = gen_cmtst_i32,
- .fniv = gen_cmtst_vec,
- .opt_opc = vecop_list_cmtst,
- .vece = MO_32 },
- { .fni8 = gen_cmtst_i64,
- .fniv = gen_cmtst_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .opt_opc = vecop_list_cmtst,
- .vece = MO_64 },
-};
+void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_cmp_vec, 0 };
+ static const GVecGen3 ops[4] = {
+ { .fni4 = gen_helper_neon_tst_u8,
+ .fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni4 = gen_helper_neon_tst_u16,
+ .fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_cmtst_i32,
+ .fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_cmtst_i64,
+ .fniv = gen_cmtst_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
{
@@ -5016,29 +5020,33 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
tcg_temp_free_vec(rsh);
}
-static const TCGOpcode ushl_list[] = {
- INDEX_op_neg_vec, INDEX_op_shlv_vec,
- INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
-};
-
-const GVecGen3 ushl_op[4] = {
- { .fniv = gen_ushl_vec,
- .fno = gen_helper_gvec_ushl_b,
- .opt_opc = ushl_list,
- .vece = MO_8 },
- { .fniv = gen_ushl_vec,
- .fno = gen_helper_gvec_ushl_h,
- .opt_opc = ushl_list,
- .vece = MO_16 },
- { .fni4 = gen_ushl_i32,
- .fniv = gen_ushl_vec,
- .opt_opc = ushl_list,
- .vece = MO_32 },
- { .fni8 = gen_ushl_i64,
- .fniv = gen_ushl_vec,
- .opt_opc = ushl_list,
- .vece = MO_64 },
-};
+void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_neg_vec, INDEX_op_shlv_vec,
+ INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_ushl_vec,
+ .fno = gen_helper_gvec_ushl_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = gen_ushl_vec,
+ .fno = gen_helper_gvec_ushl_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_ushl_i32,
+ .fniv = gen_ushl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_ushl_i64,
+ .fniv = gen_ushl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
{
@@ -5150,29 +5158,33 @@ static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
tcg_temp_free_vec(tmp);
}
-static const TCGOpcode sshl_list[] = {
- INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
- INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
-};
-
-const GVecGen3 sshl_op[4] = {
- { .fniv = gen_sshl_vec,
- .fno = gen_helper_gvec_sshl_b,
- .opt_opc = sshl_list,
- .vece = MO_8 },
- { .fniv = gen_sshl_vec,
- .fno = gen_helper_gvec_sshl_h,
- .opt_opc = sshl_list,
- .vece = MO_16 },
- { .fni4 = gen_sshl_i32,
- .fniv = gen_sshl_vec,
- .opt_opc = sshl_list,
- .vece = MO_32 },
- { .fni8 = gen_sshl_i64,
- .fniv = gen_sshl_vec,
- .opt_opc = sshl_list,
- .vece = MO_64 },
-};
+void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
+ INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_sshl_vec,
+ .fno = gen_helper_gvec_sshl_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = gen_sshl_vec,
+ .fno = gen_helper_gvec_sshl_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_sshl_i32,
+ .fniv = gen_sshl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_sshl_i64,
+ .fniv = gen_sshl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
TCGv_vec a, TCGv_vec b)
@@ -5548,8 +5560,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
vec_size, vec_size);
} else { /* VTST */
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
- vec_size, vec_size, &cmtst_op[size]);
+ gen_gvec_cmtst(size, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
}
return 0;
@@ -5584,8 +5596,13 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
case NEON_3R_VSHL:
/* Note the operation is vshl vd,vm,vn */
- tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
- u ? &ushl_op[size] : &sshl_op[size]);
+ if (u) {
+ gen_gvec_ushl(size, rd_ofs, rm_ofs, rn_ofs,
+ vec_size, vec_size);
+ } else {
+ gen_gvec_sshl(size, rd_ofs, rm_ofs, rn_ofs,
+ vec_size, vec_size);
+ }
return 0;
}
--
2.20.1
next prev parent reply other threads:[~2020-05-02 22:49 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-02 22:44 [PATCH v2 00/15] target/arm: partial vector cleanup Richard Henderson
2020-05-02 22:44 ` [PATCH v2 01/15] target/arm: Create gen_gvec_[us]sra Richard Henderson
2020-05-02 22:44 ` [PATCH v2 02/15] target/arm: Create gen_gvec_{u,s}{rshr,rsra} Richard Henderson
2020-05-02 22:44 ` [PATCH v2 03/15] target/arm: Create gen_gvec_{sri,sli} Richard Henderson
2020-05-02 22:44 ` [PATCH v2 04/15] target/arm: Remove unnecessary range check for VSHL Richard Henderson
2020-05-02 22:44 ` [PATCH v2 05/15] target/arm: Tidy handle_vec_simd_shri Richard Henderson
2020-05-02 22:44 ` [PATCH v2 06/15] target/arm: Wrap vector compare zero GVecGen2 in GVecGen2Fn Richard Henderson
2020-05-02 22:44 ` [PATCH v2 07/15] target/arm: Wrap vector mla/mls GVecGen3 in GVecGen3Fn Richard Henderson
2020-05-02 22:44 ` Richard Henderson [this message]
2020-05-02 22:44 ` [PATCH v2 09/15] target/arm: Wrap vector uqadd/sqadd/uqsub/sqsub GVecGen4 " Richard Henderson
2020-05-02 22:44 ` [PATCH v2 10/15] target/arm: Remove fp_status from helper_{recpe, rsqrte}_u32 Richard Henderson
2020-05-02 22:44 ` [PATCH v2 11/15] target/arm: Wrap vector qrdmla/qrdmls in GVecGen3Fn Richard Henderson
2020-05-02 22:45 ` [PATCH v2 12/15] target/arm: Pass pointer to qc to qrdmla/qrdmls Richard Henderson
2020-05-02 22:45 ` [PATCH v2 13/15] target/arm: Clear tail in gvec_fmul_idx_*, gvec_fmla_idx_* Richard Henderson
2020-05-02 22:45 ` [PATCH v2 14/15] target/arm: Vectorize SABD/UABD Richard Henderson
2020-05-02 22:45 ` [PATCH v2 15/15] target/arm: Vectorize SABA/UABA Richard Henderson
2020-05-05 10:38 ` [PATCH v2 00/15] target/arm: partial vector cleanup Peter Maydell
2020-05-05 14:00 ` Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200502224503.2282-9-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).