From: "Alex Bennée" <alex.bennee@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>
Cc: qemu-devel@nongnu.org, peter.maydell@linaro.org, qemu-arm@nongnu.org
Subject: Re: [Qemu-devel] [Qemu-arm] [PATCH v6 13/35] target/arm: Implement SVE gather loads
Date: Thu, 28 Jun 2018 14:18:55 +0100 [thread overview]
Message-ID: <87woujuk00.fsf@linaro.org> (raw)
In-Reply-To: <20180627043328.11531-14-richard.henderson@linaro.org>
Richard Henderson <richard.henderson@linaro.org> writes:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
>
> ---
> v6:
> * Finish esz == msz && u==1 decode in sve.decode.
> * Remove duplicate decode in trans_ST1_zprz.
> * Add xs=2 comment.
> * Reformat tables to leave room for ff helpers.
> ---
> target/arm/helper-sve.h | 67 +++++++++++++++++++++++++
> target/arm/sve_helper.c | 77 ++++++++++++++++++++++++++++
> target/arm/translate-sve.c | 100 +++++++++++++++++++++++++++++++++++++
> target/arm/sve.decode | 57 +++++++++++++++++++++
> 4 files changed, 301 insertions(+)
>
> diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
> index 8880128f9c..aeb62afc34 100644
> --- a/target/arm/helper-sve.h
> +++ b/target/arm/helper-sve.h
> @@ -959,6 +959,73 @@ DEF_HELPER_FLAGS_4(sve_st1hd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
>
> DEF_HELPER_FLAGS_4(sve_st1sd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
>
> +DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhsu_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldssu_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhss_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhsu_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldssu_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhss_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhdu_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldsdu_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldddu_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhds_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldsds_zsu, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhdu_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldsdu_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldddu_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhds_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldsds_zss, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhdu_zd, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldsdu_zd, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldddu_zd, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldhds_zd, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +DEF_HELPER_FLAGS_6(sve_ldsds_zd, TCG_CALL_NO_WG,
> + void, env, ptr, ptr, ptr, tl, i32)
> +
> DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG,
> void, env, ptr, ptr, ptr, tl, i32)
> DEF_HELPER_FLAGS_6(sve_sths_zsu, TCG_CALL_NO_WG,
> diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
> index 7622bb2af0..24f75a32d3 100644
> --- a/target/arm/sve_helper.c
> +++ b/target/arm/sve_helper.c
> @@ -3714,6 +3714,83 @@ void HELPER(sve_st4dd_r)(CPUARMState *env, void *vg,
> }
> }
>
> +/* Loads with a vector index. */
> +
> +#define DO_LD1_ZPZ_S(NAME, TYPEI, TYPEM, FN) \
> +void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
> + target_ulong base, uint32_t desc) \
> +{ \
> + intptr_t i, oprsz = simd_oprsz(desc); \
> + unsigned scale = simd_data(desc); \
> + uintptr_t ra = GETPC(); \
> + for (i = 0; i < oprsz; i++) { \
> + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
> + do { \
> + TYPEM m = 0; \
> + if (pg & 1) { \
> + target_ulong off = *(TYPEI *)(vm + H1_4(i)); \
> + m = FN(env, base + (off << scale), ra); \
> + } \
> + *(uint32_t *)(vd + H1_4(i)) = m; \
> + i += 4, pg >>= 4; \
> + } while (i & 15); \
> + } \
> +}
> +
> +#define DO_LD1_ZPZ_D(NAME, TYPEI, TYPEM, FN) \
> +void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
> + target_ulong base, uint32_t desc) \
> +{ \
> + intptr_t i, oprsz = simd_oprsz(desc) / 8; \
> + unsigned scale = simd_data(desc); \
> + uintptr_t ra = GETPC(); \
> + uint64_t *d = vd, *m = vm; uint8_t *pg = vg; \
> + for (i = 0; i < oprsz; i++) { \
> + TYPEM mm = 0; \
> + if (pg[H1(i)] & 1) { \
> + target_ulong off = (TYPEI)m[i]; \
> + mm = FN(env, base + (off << scale), ra); \
> + } \
> + d[i] = mm; \
> + } \
> +}
> +
> +DO_LD1_ZPZ_S(sve_ldbsu_zsu, uint32_t, uint8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_S(sve_ldhsu_zsu, uint32_t, uint16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_S(sve_ldssu_zsu, uint32_t, uint32_t, cpu_ldl_data_ra)
> +DO_LD1_ZPZ_S(sve_ldbss_zsu, uint32_t, int8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_S(sve_ldhss_zsu, uint32_t, int16_t, cpu_lduw_data_ra)
> +
> +DO_LD1_ZPZ_S(sve_ldbsu_zss, int32_t, uint8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_S(sve_ldhsu_zss, int32_t, uint16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_S(sve_ldssu_zss, int32_t, uint32_t, cpu_ldl_data_ra)
> +DO_LD1_ZPZ_S(sve_ldbss_zss, int32_t, int8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_S(sve_ldhss_zss, int32_t, int16_t, cpu_lduw_data_ra)
> +
> +DO_LD1_ZPZ_D(sve_ldbdu_zsu, uint32_t, uint8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_D(sve_ldhdu_zsu, uint32_t, uint16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_D(sve_ldsdu_zsu, uint32_t, uint32_t, cpu_ldl_data_ra)
> +DO_LD1_ZPZ_D(sve_ldddu_zsu, uint32_t, uint64_t, cpu_ldq_data_ra)
> +DO_LD1_ZPZ_D(sve_ldbds_zsu, uint32_t, int8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_D(sve_ldhds_zsu, uint32_t, int16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_D(sve_ldsds_zsu, uint32_t, int32_t, cpu_ldl_data_ra)
> +
> +DO_LD1_ZPZ_D(sve_ldbdu_zss, int32_t, uint8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_D(sve_ldhdu_zss, int32_t, uint16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_D(sve_ldsdu_zss, int32_t, uint32_t, cpu_ldl_data_ra)
> +DO_LD1_ZPZ_D(sve_ldddu_zss, int32_t, uint64_t, cpu_ldq_data_ra)
> +DO_LD1_ZPZ_D(sve_ldbds_zss, int32_t, int8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_D(sve_ldhds_zss, int32_t, int16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_D(sve_ldsds_zss, int32_t, int32_t, cpu_ldl_data_ra)
> +
> +DO_LD1_ZPZ_D(sve_ldbdu_zd, uint64_t, uint8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_D(sve_ldhdu_zd, uint64_t, uint16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_D(sve_ldsdu_zd, uint64_t, uint32_t, cpu_ldl_data_ra)
> +DO_LD1_ZPZ_D(sve_ldddu_zd, uint64_t, uint64_t, cpu_ldq_data_ra)
> +DO_LD1_ZPZ_D(sve_ldbds_zd, uint64_t, int8_t, cpu_ldub_data_ra)
> +DO_LD1_ZPZ_D(sve_ldhds_zd, uint64_t, int16_t, cpu_lduw_data_ra)
> +DO_LD1_ZPZ_D(sve_ldsds_zd, uint64_t, int32_t, cpu_ldl_data_ra)
> +
> /* Stores with a vector index. */
>
> #define DO_ST1_ZPZ_S(NAME, TYPEI, FN) \
> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
> index 27854e0042..33ffb217d0 100644
> --- a/target/arm/translate-sve.c
> +++ b/target/arm/translate-sve.c
> @@ -4255,6 +4255,106 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, int scale,
> tcg_temp_free_i32(desc);
> }
>
> +/* Indexed by [ff][xs][u][msz]. */
> +static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][3] = {
> + { { { gen_helper_sve_ldbss_zsu,
> + gen_helper_sve_ldhss_zsu,
> + NULL, },
> + { gen_helper_sve_ldbsu_zsu,
> + gen_helper_sve_ldhsu_zsu,
> + gen_helper_sve_ldssu_zsu, } },
> + { { gen_helper_sve_ldbss_zss,
> + gen_helper_sve_ldhss_zss,
> + NULL, },
> + { gen_helper_sve_ldbsu_zss,
> + gen_helper_sve_ldhsu_zss,
> + gen_helper_sve_ldssu_zss, } } },
> + /* TODO fill in first-fault handlers */
> +};
> +
> +/* Note that we overload xs=2 to indicate 64-bit offset. */
> +static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][3][2][4] = {
> + { { { gen_helper_sve_ldbds_zsu,
> + gen_helper_sve_ldhds_zsu,
> + gen_helper_sve_ldsds_zsu,
> + NULL, },
> + { gen_helper_sve_ldbdu_zsu,
> + gen_helper_sve_ldhdu_zsu,
> + gen_helper_sve_ldsdu_zsu,
> + gen_helper_sve_ldddu_zsu, } },
> + { { gen_helper_sve_ldbds_zss,
> + gen_helper_sve_ldhds_zss,
> + gen_helper_sve_ldsds_zss,
> + NULL, },
> + { gen_helper_sve_ldbdu_zss,
> + gen_helper_sve_ldhdu_zss,
> + gen_helper_sve_ldsdu_zss,
> + gen_helper_sve_ldddu_zss, } },
> + { { gen_helper_sve_ldbds_zd,
> + gen_helper_sve_ldhds_zd,
> + gen_helper_sve_ldsds_zd,
> + NULL, },
> + { gen_helper_sve_ldbdu_zd,
> + gen_helper_sve_ldhdu_zd,
> + gen_helper_sve_ldsdu_zd,
> + gen_helper_sve_ldddu_zd, } } },
> + /* TODO fill in first-fault handlers */
> +};
> +
> +static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
> +{
> + gen_helper_gvec_mem_scatter *fn = NULL;
> +
> + if (!sve_access_check(s)) {
> + return true;
> + }
> +
> + switch (a->esz) {
> + case MO_32:
> + fn = gather_load_fn32[a->ff][a->xs][a->u][a->msz];
> + break;
> + case MO_64:
> + fn = gather_load_fn64[a->ff][a->xs][a->u][a->msz];
> + break;
> + }
> + assert(fn != NULL);
> +
> + do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
> + cpu_reg_sp(s, a->rn), fn);
> + return true;
> +}
> +
> +static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
> +{
> + gen_helper_gvec_mem_scatter *fn = NULL;
> + TCGv_i64 imm;
> +
> + if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
> + return false;
> + }
> + if (!sve_access_check(s)) {
> + return true;
> + }
> +
> + switch (a->esz) {
> + case MO_32:
> + fn = gather_load_fn32[a->ff][0][a->u][a->msz];
> + break;
> + case MO_64:
> + fn = gather_load_fn64[a->ff][2][a->u][a->msz];
> + break;
> + }
> + assert(fn != NULL);
> +
> + /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
> + * by loading the immediate into the scalar parameter.
> + */
> + imm = tcg_const_i64(a->imm << a->msz);
> + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn);
> + tcg_temp_free_i64(imm);
> + return true;
> +}
> +
> static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
> {
> /* Indexed by [xs][msz]. */
> diff --git a/target/arm/sve.decode b/target/arm/sve.decode
> index 80b955ff84..45016c6042 100644
> --- a/target/arm/sve.decode
> +++ b/target/arm/sve.decode
> @@ -80,6 +80,8 @@
> &rpri_load rd pg rn imm dtype nreg
> &rprr_store rd pg rn rm msz esz nreg
> &rpri_store rd pg rn imm msz esz nreg
> +&rprr_gather_load rd pg rn rm esz msz u ff xs scale
> +&rpri_gather_load rd pg rn imm esz msz u ff
> &rprr_scatter_store rd pg rn rm esz msz xs scale
>
> ###########################################################################
> @@ -194,6 +196,22 @@
> @rpri_load_msz ....... .... . imm:s4 ... pg:3 rn:5 rd:5 \
> &rpri_load dtype=%msz_dtype
>
> +# Gather Loads.
> +@rprr_g_load_u ....... .. . . rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
> + &rprr_gather_load xs=2
> +@rprr_g_load_xs_u ....... .. xs:1 . rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
> + &rprr_gather_load
> +@rprr_g_load_xs_u_sc ....... .. xs:1 scale:1 rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
> + &rprr_gather_load
> +@rprr_g_load_xs_sc ....... .. xs:1 scale:1 rm:5 . . ff:1 pg:3 rn:5 rd:5 \
> + &rprr_gather_load
> +@rprr_g_load_u_sc ....... .. . scale:1 rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
> + &rprr_gather_load xs=2
> +@rprr_g_load_sc ....... .. . scale:1 rm:5 . . ff:1 pg:3 rn:5 rd:5 \
> + &rprr_gather_load xs=2
> +@rpri_g_load ....... msz:2 .. imm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
> + &rpri_gather_load
> +
> # Stores; user must fill in ESZ, MSZ, NREG as needed.
> @rprr_store ....... .. .. rm:5 ... pg:3 rn:5 rd:5 &rprr_store
> @rpri_store_msz ....... msz:2 .. . imm:s4 ... pg:3 rn:5 rd:5 &rpri_store
> @@ -759,6 +777,19 @@ LDR_zri 10000101 10 ...... 010 ... ..... ..... @rd_rn_i9
> LD1R_zpri 1000010 .. 1 imm:6 1.. pg:3 rn:5 rd:5 \
> &rpri_load dtype=%dtype_23_13 nreg=0
>
> +# SVE 32-bit gather load (scalar plus 32-bit unscaled offsets)
> +# SVE 32-bit gather load (scalar plus 32-bit scaled offsets)
> +LD1_zprz 1000010 00 .0 ..... 0.. ... ..... ..... \
> + @rprr_g_load_xs_u esz=2 msz=0 scale=0
> +LD1_zprz 1000010 01 .. ..... 0.. ... ..... ..... \
> + @rprr_g_load_xs_u_sc esz=2 msz=1
> +LD1_zprz 1000010 10 .. ..... 01. ... ..... ..... \
> + @rprr_g_load_xs_sc esz=2 msz=2 u=1
> +
> +# SVE 32-bit gather load (vector plus immediate)
> +LD1_zpiz 1000010 .. 01 ..... 1.. ... ..... ..... \
> + @rpri_g_load esz=2
> +
> ### SVE Memory Contiguous Load Group
>
> # SVE contiguous load (scalar plus scalar)
> @@ -808,6 +839,32 @@ PRF_rr 1000010 -- 00 rm:5 110 --- ----- 0 ----
>
> ### SVE Memory 64-bit Gather Group
>
> +# SVE 64-bit gather load (scalar plus 32-bit unpacked unscaled offsets)
> +# SVE 64-bit gather load (scalar plus 32-bit unpacked scaled offsets)
> +LD1_zprz 1100010 00 .0 ..... 0.. ... ..... ..... \
> + @rprr_g_load_xs_u esz=3 msz=0 scale=0
> +LD1_zprz 1100010 01 .. ..... 0.. ... ..... ..... \
> + @rprr_g_load_xs_u_sc esz=3 msz=1
> +LD1_zprz 1100010 10 .. ..... 0.. ... ..... ..... \
> + @rprr_g_load_xs_u_sc esz=3 msz=2
> +LD1_zprz 1100010 11 .. ..... 01. ... ..... ..... \
> + @rprr_g_load_xs_sc esz=3 msz=3 u=1
> +
> +# SVE 64-bit gather load (scalar plus 64-bit unscaled offsets)
> +# SVE 64-bit gather load (scalar plus 64-bit scaled offsets)
> +LD1_zprz 1100010 00 10 ..... 1.. ... ..... ..... \
> + @rprr_g_load_u esz=3 msz=0 scale=0
> +LD1_zprz 1100010 01 1. ..... 1.. ... ..... ..... \
> + @rprr_g_load_u_sc esz=3 msz=1
> +LD1_zprz 1100010 10 1. ..... 1.. ... ..... ..... \
> + @rprr_g_load_u_sc esz=3 msz=2
> +LD1_zprz 1100010 11 1. ..... 11. ... ..... ..... \
> + @rprr_g_load_sc esz=3 msz=3 u=1
> +
> +# SVE 64-bit gather load (vector plus immediate)
> +LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
> + @rpri_g_load esz=3
> +
> # SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets)
> PRF 1100010 00 11 ----- 1-- --- ----- 0 ----
--
Alex Bennée
next prev parent reply other threads:[~2018-06-28 13:19 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-27 4:32 [Qemu-devel] [PATCH v6 00/35] target/arm SVE patches Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 01/35] target/arm: Implement SVE Memory Contiguous Load Group Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 02/35] target/arm: Implement SVE Contiguous Load, first-fault and no-fault Richard Henderson
2018-06-27 15:56 ` [Qemu-devel] [Qemu-arm] " Alex Bennée
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 03/35] target/arm: Implement SVE Memory Contiguous Store Group Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 04/35] target/arm: Implement SVE load and broadcast quadword Richard Henderson
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 05/35] target/arm: Implement SVE integer convert to floating-point Richard Henderson
2018-06-28 12:49 ` Peter Maydell
2018-06-27 4:32 ` [Qemu-devel] [PATCH v6 06/35] target/arm: Implement SVE floating-point arithmetic (predicated) Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 07/35] target/arm: Implement SVE FP Multiply-Add Group Richard Henderson
2018-06-28 10:29 ` Peter Maydell
2018-06-28 10:53 ` Alex Bennée
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 08/35] target/arm: Implement SVE Floating Point Accumulating Reduction Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 09/35] target/arm: Implement SVE load and broadcast element Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 10/35] target/arm: Implement SVE store vector/predicate register Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 11/35] target/arm: Implement SVE scatter stores Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 12/35] target/arm: Implement SVE prefetches Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 13/35] target/arm: Implement SVE gather loads Richard Henderson
2018-06-28 10:37 ` Peter Maydell
2018-06-28 13:18 ` Alex Bennée [this message]
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 14/35] target/arm: Implement SVE first-fault " Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 15/35] target/arm: Implement SVE scatter store vector immediate Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 16/35] target/arm: Implement SVE floating-point compare vectors Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 17/35] target/arm: Implement SVE floating-point arithmetic with immediate Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 18/35] target/arm: Implement SVE Floating Point Multiply Indexed Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 19/35] target/arm: Implement SVE FP Fast Reduction Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 20/35] target/arm: Implement SVE Floating Point Unary Operations - Unpredicated Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 21/35] target/arm: Implement SVE FP Compare with Zero Group Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 22/35] target/arm: Implement SVE floating-point trig multiply-add coefficient Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 23/35] target/arm: Implement SVE floating-point convert precision Richard Henderson
2018-06-28 12:55 ` Peter Maydell
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 24/35] target/arm: Implement SVE floating-point convert to integer Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 25/35] target/arm: Implement SVE floating-point round to integral value Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 26/35] target/arm: Implement SVE floating-point unary operations Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 27/35] target/arm: Implement SVE MOVPRFX Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 28/35] target/arm: Implement SVE floating-point complex add Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 29/35] target/arm: Implement SVE fp complex multiply add Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 30/35] target/arm: Pass index to AdvSIMD FCMLA (indexed) Richard Henderson
2018-06-28 10:52 ` Peter Maydell
2018-06-28 13:25 ` [Qemu-devel] [Qemu-arm] " Alex Bennée
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 31/35] target/arm: Implement SVE fp complex multiply add (indexed) Richard Henderson
2018-06-28 12:47 ` Peter Maydell
2018-06-28 13:55 ` Alex Bennée
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 32/35] target/arm: Implement SVE dot product (vectors) Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 33/35] target/arm: Implement SVE dot product (indexed) Richard Henderson
2018-06-28 13:07 ` Peter Maydell
2018-06-28 15:57 ` Richard Henderson
2018-06-28 16:07 ` Peter Maydell
2018-06-28 14:04 ` [Qemu-devel] [Qemu-arm] " Alex Bennée
2018-06-28 15:47 ` Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 34/35] target/arm: Enable SVE for aarch64-linux-user Richard Henderson
2018-06-27 4:33 ` [Qemu-devel] [PATCH v6 35/35] target/arm: Implement ARMv8.2-DotProd Richard Henderson
2018-06-28 11:30 ` [Qemu-devel] [Qemu-arm] [PATCH v6 00/35] target/arm SVE patches Alex Bennée
2018-06-28 14:12 ` Peter Maydell
2018-06-28 14:55 ` Peter Maydell
2018-06-28 14:55 ` Alex Bennée
2018-06-28 14:05 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87woujuk00.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).