From: "Alex Bennée" <alex.bennee@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>
Cc: qemu-devel@nongnu.org, peter.maydell@linaro.org, qemu-arm@nongnu.org
Subject: Re: [Qemu-devel] [Qemu-arm] [PATCH v6 33/35] target/arm: Implement SVE dot product (indexed)
Date: Thu, 28 Jun 2018 15:04:57 +0100 [thread overview]
Message-ID: <87sh57uhva.fsf@linaro.org> (raw)
In-Reply-To: <20180627043328.11531-34-richard.henderson@linaro.org>
Richard Henderson <richard.henderson@linaro.org> writes:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>
> ---
> v6: Rearrange the loops. The compiler does well with this form
> and hopefully they are also easier to read.
> ---
> target/arm/helper.h | 5 ++
> target/arm/translate-sve.c | 18 ++++++
> target/arm/vec_helper.c | 124 +++++++++++++++++++++++++++++++++++++
> target/arm/sve.decode | 8 ++-
> 4 files changed, 154 insertions(+), 1 deletion(-)
>
> diff --git a/target/arm/helper.h b/target/arm/helper.h
> index e23ce7ff19..59e8c3bd1b 100644
> --- a/target/arm/helper.h
> +++ b/target/arm/helper.h
> @@ -588,6 +588,11 @@ DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
>
> +DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +
> DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
> void, ptr, ptr, ptr, ptr, i32)
> DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
> index 8a2bd1f8c5..3cff71cae8 100644
> --- a/target/arm/translate-sve.c
> +++ b/target/arm/translate-sve.c
> @@ -3440,6 +3440,24 @@ static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a, uint32_t insn)
> return true;
> }
>
> +static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a, uint32_t insn)
> +{
> + static gen_helper_gvec_3 * const fns[2][2] = {
> + { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h },
> + { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h }
> + };
> +
> + if (sve_access_check(s)) {
> + unsigned vsz = vec_full_reg_size(s);
> + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
> + vec_full_reg_offset(s, a->rn),
> + vec_full_reg_offset(s, a->rm),
> + vsz, vsz, a->index, fns[a->u][a->sz]);
> + }
> + return true;
> +}
> +
> +
> /*
> *** SVE Floating Point Multiply-Add Indexed Group
> */
> diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
> index c16a30c3b5..37f338732e 100644
> --- a/target/arm/vec_helper.c
> +++ b/target/arm/vec_helper.c
> @@ -261,6 +261,130 @@ void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc)
> clear_tail(d, opr_sz, simd_maxsz(desc));
> }
>
> +void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
> +{
> + intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
> + intptr_t index = simd_data(desc);
> + uint32_t *d = vd;
> + int8_t *n = vn;
> + int8_t *m_indexed = (int8_t *)vm + index * 4;
> +
> + /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
> + * Otherwise opr_sz is a multiple of 16.
> + */
> + segend = MIN(4, opr_sz_4);
> + i = 0;
> + do {
> + int8_t m0 = m_indexed[i * 4 + 0];
> + int8_t m1 = m_indexed[i * 4 + 1];
> + int8_t m2 = m_indexed[i * 4 + 2];
> + int8_t m3 = m_indexed[i * 4 + 3];
> +
> + do {
> + d[i] += n[i * 4 + 0] * m0
> + + n[i * 4 + 1] * m1
> + + n[i * 4 + 2] * m2
> + + n[i * 4 + 3] * m3;
> + } while (++i < segend);
> + segend = i + 4;
> + } while (i < opr_sz_4);
> +
> + clear_tail(d, opr_sz, simd_maxsz(desc));
> +}
> +
> +void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
> +{
> + intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
> + intptr_t index = simd_data(desc);
> + uint32_t *d = vd;
> + uint8_t *n = vn;
> + uint8_t *m_indexed = (uint8_t *)vm + index * 4;
> +
> + /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
> + * Otherwise opr_sz is a multiple of 16.
> + */
> + segend = MIN(4, opr_sz_4);
> + i = 0;
> + do {
> + uint8_t m0 = m_indexed[i * 4 + 0];
> + uint8_t m1 = m_indexed[i * 4 + 1];
> + uint8_t m2 = m_indexed[i * 4 + 2];
> + uint8_t m3 = m_indexed[i * 4 + 3];
> +
> + do {
> + d[i] += n[i * 4 + 0] * m0
> + + n[i * 4 + 1] * m1
> + + n[i * 4 + 2] * m2
> + + n[i * 4 + 3] * m3;
> + } while (++i < segend);
> + segend = i + 4;
> + } while (i < opr_sz_4);
> +
> + clear_tail(d, opr_sz, simd_maxsz(desc));
> +}
> +
> +void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
> +{
> + intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
> + intptr_t index = simd_data(desc);
> + uint64_t *d = vd;
> + int16_t *n = vn;
> + int16_t *m_indexed = (int16_t *)vm + index * 4;
> +
> + /* This is supported by SVE only, so opr_sz is always a multiple of 16.
> + * Process the entire segment all at once, writing back the results
> + * only after we've consumed all of the inputs.
> + */
> + for (i = 0; i < opr_sz_8 ; i += 2) {
> + uint64_t d0, d1;
> +
> + d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
> + d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];
> + d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];
> + d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];
> + d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
> + d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];
> + d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];
> + d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];
> +
> + d[i + 0] += d0;
> + d[i + 1] += d1;
> + }
Looking at the dissembler output I guess the metrics don't make it worth
the compiler vectorising any of this which is a shame.
Anyway:
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> +
> + clear_tail(d, opr_sz, simd_maxsz(desc));
> +}
> +
> +void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
> +{
> + intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
> + intptr_t index = simd_data(desc);
> + uint64_t *d = vd;
> + uint16_t *n = vn;
> + uint16_t *m_indexed = (uint16_t *)vm + index * 4;
> +
> + /* This is supported by SVE only, so opr_sz is always a multiple of 16.
> + * Process the entire segment all at once, writing back the results
> + * only after we've consumed all of the inputs.
> + */
> + for (i = 0; i < opr_sz_8 ; i += 2) {
> + uint64_t d0, d1;
> +
> + d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
> + d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];
> + d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];
> + d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];
> + d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
> + d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];
> + d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];
> + d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];
> +
> + d[i + 0] += d0;
> + d[i + 1] += d1;
> + }
> +
> + clear_tail(d, opr_sz, simd_maxsz(desc));
> +}
> +
> void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
> void *vfpst, uint32_t desc)
> {
> diff --git a/target/arm/sve.decode b/target/arm/sve.decode
> index 35415bfb6c..e10b689454 100644
> --- a/target/arm/sve.decode
> +++ b/target/arm/sve.decode
> @@ -726,7 +726,13 @@ UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u
> MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
>
> # SVE integer dot product (unpredicated)
> -DOT_zzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5
> +DOT_zzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 ra=%reg_movprfx
> +
> +# SVE integer dot product (indexed)
> +DOT_zzx 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \
> + sz=0 ra=%reg_movprfx
> +DOT_zzx 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \
> + sz=1 ra=%reg_movprfx
>
> # SVE floating-point complex add (predicated)
> FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
--
Alex Bennée
next prev parent reply other threads:[~2018-06-28 14:05 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-27 4:32 [Qemu-devel] [PATCH v6 00/35] target/arm SVE patches Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 01/35] target/arm: Implement SVE Memory Contiguous Load Group Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 02/35] target/arm: Implement SVE Contiguous Load, first-fault and no-fault Richard Henderson
2018-06-27 15:56 ` [Qemu-devel] [Qemu-arm] " Alex Bennée
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 03/35] target/arm: Implement SVE Memory Contiguous Store Group Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 04/35] target/arm: Implement SVE load and broadcast quadword Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 05/35] target/arm: Implement SVE integer convert to floating-point Richard Henderson
2018-06-28 12:49 ` Peter Maydell
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 06/35] target/arm: Implement SVE floating-point arithmetic (predicated) Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 07/35] target/arm: Implement SVE FP Multiply-Add Group Richard Henderson
2018-06-28 10:29 ` Peter Maydell
2018-06-28 10:53 ` Alex Bennée
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 08/35] target/arm: Implement SVE Floating Point Accumulating Reduction Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 09/35] target/arm: Implement SVE load and broadcast element Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 10/35] target/arm: Implement SVE store vector/predicate register Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 11/35] target/arm: Implement SVE scatter stores Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 12/35] target/arm: Implement SVE prefetches Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 13/35] target/arm: Implement SVE gather loads Richard Henderson
2018-06-28 10:37 ` Peter Maydell
2018-06-28 13:18 ` [Qemu-devel] [Qemu-arm] " Alex Bennée
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 14/35] target/arm: Implement SVE first-fault " Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 15/35] target/arm: Implement SVE scatter store vector immediate Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 16/35] target/arm: Implement SVE floating-point compare vectors Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 17/35] target/arm: Implement SVE floating-point arithmetic with immediate Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 18/35] target/arm: Implement SVE Floating Point Multiply Indexed Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 19/35] target/arm: Implement SVE FP Fast Reduction Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 20/35] target/arm: Implement SVE Floating Point Unary Operations - Unpredicated Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 21/35] target/arm: Implement SVE FP Compare with Zero Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 22/35] target/arm: Implement SVE floating-point trig multiply-add coefficient Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 23/35] target/arm: Implement SVE floating-point convert precision Richard Henderson
2018-06-28 12:55 ` Peter Maydell
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 24/35] target/arm: Implement SVE floating-point convert to integer Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 25/35] target/arm: Implement SVE floating-point round to integral value Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 26/35] target/arm: Implement SVE floating-point unary operations Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 27/35] target/arm: Implement SVE MOVPRFX Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 28/35] target/arm: Implement SVE floating-point complex add Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 29/35] target/arm: Implement SVE fp complex multiply add Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 30/35] target/arm: Pass index to AdvSIMD FCMLA (indexed) Richard Henderson
2018-06-28 10:52 ` Peter Maydell
2018-06-28 13:25 ` [Qemu-devel] [Qemu-arm] " Alex Bennée
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 31/35] target/arm: Implement SVE fp complex multiply add (indexed) Richard Henderson
2018-06-28 12:47 ` Peter Maydell
2018-06-28 13:55 ` Alex Bennée
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 32/35] target/arm: Implement SVE dot product (vectors) Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 33/35] target/arm: Implement SVE dot product (indexed) Richard Henderson
2018-06-28 13:07 ` Peter Maydell
2018-06-28 15:57 ` Richard Henderson
2018-06-28 16:07 ` Peter Maydell
2018-06-28 14:04 ` Alex Bennée [this message]
2018-06-28 15:47 ` [Qemu-devel] [Qemu-arm] " Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 34/35] target/arm: Enable SVE for aarch64-linux-user Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 35/35] target/arm: Implement ARMv8.2-DotProd Richard Henderson
2018-06-28 11:30 ` [Qemu-devel] [Qemu-arm] [PATCH v6 00/35] target/arm SVE patches Alex Bennée
2018-06-28 14:12 ` Peter Maydell
2018-06-28 14:55 ` Peter Maydell
2018-06-28 14:55 ` Alex Bennée
2018-06-28 14:05 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87sh57uhva.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).