qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Alex Bennée" <alex.bennee@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>
Cc: qemu-devel@nongnu.org, peter.maydell@linaro.org
Subject: Re: [Qemu-devel] [PATCH v5 01/35] target/arm: Implement SVE Memory Contiguous Load Group
Date: Tue, 26 Jun 2018 10:55:38 +0100	[thread overview]
Message-ID: <87o9fxx46d.fsf@linaro.org> (raw)
In-Reply-To: <20180621015359.12018-2-richard.henderson@linaro.org>


Richard Henderson <richard.henderson@linaro.org> writes:

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  target/arm/helper-sve.h    |  35 +++++++++
>  target/arm/sve_helper.c    | 153 +++++++++++++++++++++++++++++++++++++
>  target/arm/translate-sve.c | 121 +++++++++++++++++++++++++++++
>  target/arm/sve.decode      |  34 +++++++++
>  4 files changed, 343 insertions(+)
>
> diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
> index 2e76084992..fcc9ba5f50 100644
> --- a/target/arm/helper-sve.h
> +++ b/target/arm/helper-sve.h
> @@ -719,3 +719,38 @@ DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG,
>                     void, ptr, ptr, ptr, ptr, i32)
>  DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG,
>                     void, ptr, ptr, ptr, ptr, i32)
> +
> +DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_4(sve_ld1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld2hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld3hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld4hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_4(sve_ld1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld2ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld3ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld4ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_4(sve_ld1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld2dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld3dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld4dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_4(sve_ld1hsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1hdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1hss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1hds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +
> +DEF_HELPER_FLAGS_4(sve_ld1sdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> +DEF_HELPER_FLAGS_4(sve_ld1sds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
> index 128bbf9b04..4e6ad282f9 100644
> --- a/target/arm/sve_helper.c
> +++ b/target/arm/sve_helper.c
> @@ -2810,3 +2810,156 @@ uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc)
>
>      return predtest_ones(d, oprsz, esz_mask);
>  }
> +
> +/*
> + * Load contiguous data, protected by a governing predicate.
> + */
> +#define DO_LD1(NAME, FN, TYPEE, TYPEM, H)                  \
> +static void do_##NAME(CPUARMState *env, void *vd, void *vg, \
> +                      target_ulong addr, intptr_t oprsz,   \
> +                      uintptr_t ra)                        \
> +{                                                          \
> +    intptr_t i = 0;                                        \
> +    do {                                                   \
> +        uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));    \
> +        do {                                               \
> +            TYPEM m = 0;                                   \
> +            if (pg & 1) {                                  \
> +                m = FN(env, addr, ra);                     \
> +            }                                              \
> +            *(TYPEE *)(vd + H(i)) = m;                     \
> +            i += sizeof(TYPEE), pg >>= sizeof(TYPEE);      \
> +            addr += sizeof(TYPEM);                         \
> +        } while (i & 15);                                  \
> +    } while (i < oprsz);                                   \
> +}                                                          \
> +void HELPER(NAME)(CPUARMState *env, void *vg,              \
> +                  target_ulong addr, uint32_t desc)        \
> +{                                                          \
> +    do_##NAME(env, &env->vfp.zregs[simd_data(desc)], vg,   \
> +              addr, simd_oprsz(desc), GETPC());            \
> +}
> +
> +#define DO_LD2(NAME, FN, TYPEE, TYPEM, H)                  \
> +void HELPER(NAME)(CPUARMState *env, void *vg,              \
> +                  target_ulong addr, uint32_t desc)        \
> +{                                                          \
> +    intptr_t i, oprsz = simd_oprsz(desc);                  \
> +    intptr_t ra = GETPC();                                 \
> +    unsigned rd = simd_data(desc);                         \
> +    void *d1 = &env->vfp.zregs[rd];                        \
> +    void *d2 = &env->vfp.zregs[(rd + 1) & 31];             \
> +    for (i = 0; i < oprsz; ) {                             \
> +        uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));    \
> +        do {                                               \
> +            TYPEM m1 = 0, m2 = 0;                          \
> +            if (pg & 1) {                                  \
> +                m1 = FN(env, addr, ra);                    \
> +                m2 = FN(env, addr + sizeof(TYPEM), ra);    \
> +            }                                              \
> +            *(TYPEE *)(d1 + H(i)) = m1;                    \
> +            *(TYPEE *)(d2 + H(i)) = m2;                    \
> +            i += sizeof(TYPEE), pg >>= sizeof(TYPEE);      \
> +            addr += 2 * sizeof(TYPEM);                     \
> +        } while (i & 15);                                  \
> +    }                                                      \
> +}
> +
> +#define DO_LD3(NAME, FN, TYPEE, TYPEM, H)                  \
> +void HELPER(NAME)(CPUARMState *env, void *vg,              \
> +                  target_ulong addr, uint32_t desc)        \
> +{                                                          \
> +    intptr_t i, oprsz = simd_oprsz(desc);                  \
> +    intptr_t ra = GETPC();                                 \
> +    unsigned rd = simd_data(desc);                         \
> +    void *d1 = &env->vfp.zregs[rd];                        \
> +    void *d2 = &env->vfp.zregs[(rd + 1) & 31];             \
> +    void *d3 = &env->vfp.zregs[(rd + 2) & 31];             \
> +    for (i = 0; i < oprsz; ) {                             \
> +        uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));    \
> +        do {                                               \
> +            TYPEM m1 = 0, m2 = 0, m3 = 0;                  \
> +            if (pg & 1) {                                  \
> +                m1 = FN(env, addr, ra);                    \
> +                m2 = FN(env, addr + sizeof(TYPEM), ra);    \
> +                m3 = FN(env, addr + 2 * sizeof(TYPEM), ra); \
> +            }                                              \
> +            *(TYPEE *)(d1 + H(i)) = m1;                    \
> +            *(TYPEE *)(d2 + H(i)) = m2;                    \
> +            *(TYPEE *)(d3 + H(i)) = m3;                    \
> +            i += sizeof(TYPEE), pg >>= sizeof(TYPEE);      \
> +            addr += 3 * sizeof(TYPEM);                     \
> +        } while (i & 15);                                  \
> +    }                                                      \
> +}
> +
> +#define DO_LD4(NAME, FN, TYPEE, TYPEM, H)                  \
> +void HELPER(NAME)(CPUARMState *env, void *vg,              \
> +                  target_ulong addr, uint32_t desc)        \
> +{                                                          \
> +    intptr_t i, oprsz = simd_oprsz(desc);                  \
> +    intptr_t ra = GETPC();                                 \
> +    unsigned rd = simd_data(desc);                         \
> +    void *d1 = &env->vfp.zregs[rd];                        \
> +    void *d2 = &env->vfp.zregs[(rd + 1) & 31];             \
> +    void *d3 = &env->vfp.zregs[(rd + 2) & 31];             \
> +    void *d4 = &env->vfp.zregs[(rd + 3) & 31];             \
> +    for (i = 0; i < oprsz; ) {                             \
> +        uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));    \
> +        do {                                               \
> +            TYPEM m1 = 0, m2 = 0, m3 = 0, m4 = 0;          \
> +            if (pg & 1) {                                  \
> +                m1 = FN(env, addr, ra);                    \
> +                m2 = FN(env, addr + sizeof(TYPEM), ra);    \
> +                m3 = FN(env, addr + 2 * sizeof(TYPEM), ra); \
> +                m4 = FN(env, addr + 3 * sizeof(TYPEM), ra); \
> +            }                                              \
> +            *(TYPEE *)(d1 + H(i)) = m1;                    \
> +            *(TYPEE *)(d2 + H(i)) = m2;                    \
> +            *(TYPEE *)(d3 + H(i)) = m3;                    \
> +            *(TYPEE *)(d4 + H(i)) = m4;                    \
> +            i += sizeof(TYPEE), pg >>= sizeof(TYPEE);      \
> +            addr += 4 * sizeof(TYPEM);                     \
> +        } while (i & 15);                                  \
> +    }                                                      \
> +}
> +
> +DO_LD1(sve_ld1bhu_r, cpu_ldub_data_ra, uint16_t, uint8_t, H1_2)
> +DO_LD1(sve_ld1bhs_r, cpu_ldsb_data_ra, uint16_t, int8_t, H1_2)
> +DO_LD1(sve_ld1bsu_r, cpu_ldub_data_ra, uint32_t, uint8_t, H1_4)
> +DO_LD1(sve_ld1bss_r, cpu_ldsb_data_ra, uint32_t, int8_t, H1_4)
> +DO_LD1(sve_ld1bdu_r, cpu_ldub_data_ra, uint64_t, uint8_t, )
> +DO_LD1(sve_ld1bds_r, cpu_ldsb_data_ra, uint64_t, int8_t, )
> +
> +DO_LD1(sve_ld1hsu_r, cpu_lduw_data_ra, uint32_t, uint16_t, H1_4)
> +DO_LD1(sve_ld1hss_r, cpu_ldsw_data_ra, uint32_t, int8_t, H1_4)
> +DO_LD1(sve_ld1hdu_r, cpu_lduw_data_ra, uint64_t, uint16_t, )
> +DO_LD1(sve_ld1hds_r, cpu_ldsw_data_ra, uint64_t, int16_t, )
> +
> +DO_LD1(sve_ld1sdu_r, cpu_ldl_data_ra, uint64_t, uint32_t, )
> +DO_LD1(sve_ld1sds_r, cpu_ldl_data_ra, uint64_t, int32_t, )
> +
> +DO_LD1(sve_ld1bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
> +DO_LD2(sve_ld2bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
> +DO_LD3(sve_ld3bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
> +DO_LD4(sve_ld4bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
> +
> +DO_LD1(sve_ld1hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
> +DO_LD2(sve_ld2hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
> +DO_LD3(sve_ld3hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
> +DO_LD4(sve_ld4hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
> +
> +DO_LD1(sve_ld1ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
> +DO_LD2(sve_ld2ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
> +DO_LD3(sve_ld3ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
> +DO_LD4(sve_ld4ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
> +
> +DO_LD1(sve_ld1dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
> +DO_LD2(sve_ld2dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
> +DO_LD3(sve_ld3dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
> +DO_LD4(sve_ld4dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
> +
> +#undef DO_LD1
> +#undef DO_LD2
> +#undef DO_LD3
> +#undef DO_LD4
> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
> index 226c97579c..3543daff48 100644
> --- a/target/arm/translate-sve.c
> +++ b/target/arm/translate-sve.c
> @@ -42,6 +42,8 @@ typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr,
>  typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr,
>                                       TCGv_ptr, TCGv_ptr, TCGv_i32);
>
> +typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32);
> +
>  /*
>   * Helpers for extracting complex instruction fields.
>   */
> @@ -82,6 +84,15 @@ static inline int expand_imm_sh8u(int x)
>      return (uint8_t)x << (x & 0x100 ? 8 : 0);
>  }
>
> +/* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
> + * with unsigned data.  C.f. SVE Memory Contiguous Load Group.
> + */
> +static inline int msz_dtype(int msz)
> +{
> +    static const uint8_t dtype[4] = { 0, 5, 10, 15 };
> +    return dtype[msz];
> +}

I'm a little confused by the magic numbers in dtype[4], do they map
directly to dtype_mop[]?

> +
>  /*
>   * Include the generated decoder.
>   */
> @@ -3526,3 +3537,113 @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a, uint32_t insn)
>      }
>      return true;
>  }
> +
> +/*
> + *** SVE Memory - Contiguous Load Group
> + */
> +
> +/* The memory mode of the dtype.  */
> +static const TCGMemOp dtype_mop[16] = {
> +    MO_UB, MO_UB, MO_UB, MO_UB,
> +    MO_SL, MO_UW, MO_UW, MO_UW,
> +    MO_SW, MO_SW, MO_UL, MO_UL,
> +    MO_SB, MO_SB, MO_SB, MO_Q
> +};
> +
> +#define dtype_msz(x)  (dtype_mop[x] & MO_SIZE)
> +
> +/* The vector element size of dtype.  */
> +static const uint8_t dtype_esz[16] = {
> +    0, 1, 2, 3,
> +    3, 1, 2, 3,
> +    3, 2, 2, 3,
> +    3, 2, 1, 3
> +};
> +
> +static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
> +                       gen_helper_gvec_mem *fn)
> +{
> +    unsigned vsz = vec_full_reg_size(s);
> +    TCGv_ptr t_pg;
> +    TCGv_i32 desc;
> +
> +    /* For e.g. LD4, there are not enough arguments to pass all 4
> +     * registers as pointers, so encode the regno into the data field.
> +     * For consistency, do this even for LD1.
> +     */
> +    desc = tcg_const_i32(simd_desc(vsz, vsz, zt));
> +    t_pg = tcg_temp_new_ptr();
> +
> +    tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
> +    fn(cpu_env, t_pg, addr, desc);
> +
> +    tcg_temp_free_ptr(t_pg);
> +    tcg_temp_free_i32(desc);
> +}
> +
> +static void do_ld_zpa(DisasContext *s, int zt, int pg,
> +                      TCGv_i64 addr, int dtype, int nreg)
> +{
> +    static gen_helper_gvec_mem * const fns[16][4] = {
> +        { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
> +          gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
> +        { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
> +
> +        { gen_helper_sve_ld1sds_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1hh_r, gen_helper_sve_ld2hh_r,
> +          gen_helper_sve_ld3hh_r, gen_helper_sve_ld4hh_r },
> +        { gen_helper_sve_ld1hsu_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1hdu_r, NULL, NULL, NULL },
> +
> +        { gen_helper_sve_ld1hds_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1hss_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1ss_r, gen_helper_sve_ld2ss_r,
> +          gen_helper_sve_ld3ss_r, gen_helper_sve_ld4ss_r },
> +        { gen_helper_sve_ld1sdu_r, NULL, NULL, NULL },
> +
> +        { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
> +        { gen_helper_sve_ld1dd_r, gen_helper_sve_ld2dd_r,
> +          gen_helper_sve_ld3dd_r, gen_helper_sve_ld4dd_r },
> +    };
> +    gen_helper_gvec_mem *fn = fns[dtype][nreg];
> +
> +    /* While there are holes in the table, they are not
> +     * accessible via the instruction encoding.
> +     */
> +    assert(fn != NULL);
> +    do_mem_zpa(s, zt, pg, addr, fn);
> +}
> +
> +static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn)
> +{
> +    if (a->rm == 31) {
> +        return false;
> +    }
> +    if (sve_access_check(s)) {
> +        TCGv_i64 addr = new_tmp_a64(s);
> +        tcg_gen_muli_i64(addr, cpu_reg(s, a->rm),
> +                         (a->nreg + 1) << dtype_msz(a->dtype));
> +        tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
> +        do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
> +    }
> +    return true;
> +}
> +
> +static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
> +{
> +    if (sve_access_check(s)) {
> +        int vsz = vec_full_reg_size(s);
> +        int elements = vsz >> dtype_esz[a->dtype];
> +        TCGv_i64 addr = new_tmp_a64(s);
> +
> +        tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
> +                         (a->imm * elements * (a->nreg + 1))
> +                         << dtype_msz(a->dtype));
> +        do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
> +    }
> +    return true;
> +}
> diff --git a/target/arm/sve.decode b/target/arm/sve.decode
> index 6f436f9096..cfb12da639 100644
> --- a/target/arm/sve.decode
> +++ b/target/arm/sve.decode
> @@ -45,6 +45,9 @@
>  # Unsigned 8-bit immediate, optionally shifted left by 8.
>  %sh8_i8u        5:9 !function=expand_imm_sh8u
>
> +# Unsigned load of msz into esz=2, represented as a dtype.
> +%msz_dtype      23:2 !function=msz_dtype
> +
>  # Either a copy of rd (at bit 0), or a different source
>  # as propagated via the MOVPRFX instruction.
>  %reg_movprfx    0:5
> @@ -71,6 +74,8 @@
>  &incdec2_cnt    rd rn pat esz imm d u
>  &incdec_pred    rd pg esz d u
>  &incdec2_pred   rd rn pg esz d u
> +&rprr_load      rd pg rn rm dtype nreg
> +&rpri_load      rd pg rn imm dtype nreg
>
>  ###########################################################################
>  # Named instruction formats.  These are generally used to
> @@ -170,6 +175,15 @@
>  @incdec2_pred   ........ esz:2 .... .. ..... .. pg:4 rd:5 \
>                  &incdec2_pred rn=%reg_movprfx
>
> +# Loads; user must fill in NREG.
> +@rprr_load_dt   ....... dtype:4 rm:5 ... pg:3 rn:5 rd:5         &rprr_load
> +@rpri_load_dt   ....... dtype:4 . imm:s4 ... pg:3 rn:5 rd:5     &rpri_load
> +
> +@rprr_load_msz  ....... .... rm:5 ... pg:3 rn:5 rd:5 \
> +                &rprr_load dtype=%msz_dtype
> +@rpri_load_msz  ....... .... . imm:s4 ... pg:3 rn:5 rd:5 \
> +                &rpri_load dtype=%msz_dtype
> +
>  ###########################################################################
>  # Instruction patterns.  Grouped according to the SVE encodingindex.xhtml.
>
> @@ -665,3 +679,23 @@ LDR_pri         10000101 10 ...... 000 ... ..... 0 ....         @pd_rn_i9
>
>  # SVE load vector register
>  LDR_zri         10000101 10 ...... 010 ... ..... .....          @rd_rn_i9
> +
> +### SVE Memory Contiguous Load Group
> +

> +# SVE contiguous load (scalar plus scalar)
> +LD_zprr         1010010 .... ..... 010 ... ..... .....    @rprr_load_dt nreg=0
> +
> +# SVE contiguous load (scalar plus immediate)
> +LD_zpri         1010010 .... 0.... 101 ... ..... .....    @rpri_load_dt nreg=0
> +
> +# SVE contiguous non-temporal load (scalar plus scalar)
> +# LDNT1B, LDNT1H, LDNT1W, LDNT1D
> +# SVE load multiple structures (scalar plus scalar)
> +# LD2B, LD2H, LD2W, LD2D; etc.
> +LD_zprr         1010010 .. nreg:2 ..... 110 ... ..... .....     @rprr_load_msz
> +
> +# SVE contiguous non-temporal load (scalar plus immediate)
> +# LDNT1B, LDNT1H, LDNT1W, LDNT1D
> +# SVE load multiple structures (scalar plus immediate)
> +# LD2B, LD2H, LD2W, LD2D; etc.
> +LD_zpri         1010010 .. nreg:2 0.... 111 ... ..... .....     @rpri_load_msz

Otherwise:

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

--
Alex Bennée

  parent reply	other threads:[~2018-06-26  9:55 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-21  1:53 [Qemu-devel] [PATCH v5 00/35] target/arm SVE patches Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 01/35] target/arm: Implement SVE Memory Contiguous Load Group Richard Henderson
2018-06-22 15:29   ` Peter Maydell
2018-06-26  9:55   ` Alex Bennée [this message]
2018-06-26 14:04     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 02/35] target/arm: Implement SVE Contiguous Load, first-fault and no-fault Richard Henderson
2018-06-22 16:04   ` Peter Maydell
2018-06-22 18:37     ` Richard Henderson
2018-06-26 12:52   ` Alex Bennée
2018-06-26 14:06     ` Richard Henderson
2018-06-27 11:37       ` Alex Bennée
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 03/35] target/arm: Implement SVE Memory Contiguous Store Group Richard Henderson
2018-06-25 15:03   ` Peter Maydell
2018-06-27 11:38   ` Alex Bennée
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 04/35] target/arm: Implement SVE load and broadcast quadword Richard Henderson
2018-06-25 15:08   ` Peter Maydell
2018-06-27 14:05   ` Alex Bennée
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 05/35] target/arm: Implement SVE integer convert to floating-point Richard Henderson
2018-06-25 15:21   ` Peter Maydell
2018-06-27 14:19   ` Alex Bennée
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 06/35] target/arm: Implement SVE floating-point arithmetic (predicated) Richard Henderson
2018-06-25 15:24   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 07/35] target/arm: Implement SVE FP Multiply-Add Group Richard Henderson
2018-06-25 15:32   ` Peter Maydell
2018-06-26 14:08     ` Richard Henderson
2018-06-26 14:11       ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 08/35] target/arm: Implement SVE Floating Point Accumulating Reduction Group Richard Henderson
2018-06-25 15:35   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 09/35] target/arm: Implement SVE load and broadcast element Richard Henderson
2018-06-25 15:46   ` Peter Maydell
2018-06-26 14:10     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 10/35] target/arm: Implement SVE store vector/predicate register Richard Henderson
2018-06-25 15:51   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 11/35] target/arm: Implement SVE scatter stores Richard Henderson
2018-06-25 16:13   ` Peter Maydell
2018-06-26 14:21     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 12/35] target/arm: Implement SVE prefetches Richard Henderson
2018-06-25 16:18   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 13/35] target/arm: Implement SVE gather loads Richard Henderson
2018-06-25 16:55   ` Peter Maydell
2018-06-26 14:39     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 14/35] target/arm: Implement SVE first-fault " Richard Henderson
2018-06-25 16:57   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 15/35] target/arm: Implement SVE scatter store vector immediate Richard Henderson
2018-06-25 17:00   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 16/35] target/arm: Implement SVE floating-point compare vectors Richard Henderson
2018-06-25 17:20   ` Peter Maydell
2018-06-26 16:41     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 17/35] target/arm: Implement SVE floating-point arithmetic with immediate Richard Henderson
2018-06-25 17:27   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 18/35] target/arm: Implement SVE Floating Point Multiply Indexed Group Richard Henderson
2018-06-25 17:47   ` Peter Maydell
2018-06-26 14:50     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 19/35] target/arm: Implement SVE FP Fast Reduction Group Richard Henderson
2018-06-26 10:09   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 20/35] target/arm: Implement SVE Floating Point Unary Operations - Unpredicated Group Richard Henderson
2018-06-26 10:13   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 21/35] target/arm: Implement SVE FP Compare with Zero Group Richard Henderson
2018-06-26 10:18   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 22/35] target/arm: Implement SVE floating-point trig multiply-add coefficient Richard Henderson
2018-06-26 10:25   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 23/35] target/arm: Implement SVE floating-point convert precision Richard Henderson
2018-06-26 10:44   ` Peter Maydell
2018-06-27  4:02     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 24/35] target/arm: Implement SVE floating-point convert to integer Richard Henderson
2018-06-26 10:58   ` Peter Maydell
2018-06-26 18:24     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 25/35] target/arm: Implement SVE floating-point round to integral value Richard Henderson
2018-06-26 12:09   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 26/35] target/arm: Implement SVE floating-point unary operations Richard Henderson
2018-06-26 12:13   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 27/35] target/arm: Implement SVE MOVPRFX Richard Henderson
2018-06-26 12:24   ` Peter Maydell
2018-06-26 14:57     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 28/35] target/arm: Implement SVE floating-point complex add Richard Henderson
2018-06-26 13:17   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 29/35] target/arm: Implement SVE fp complex multiply add Richard Henderson
2018-06-26 13:29   ` Peter Maydell
2018-06-26 15:04     ` Richard Henderson
2018-06-26 15:17       ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 30/35] target/arm: Pass index to AdvSIMD FCMLA (indexed) Richard Henderson
2018-06-26 13:38   ` Peter Maydell
2018-06-26 15:07     ` Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 31/35] target/arm: Implement SVE fp complex multiply add (indexed) Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 32/35] target/arm: Implement SVE dot product (vectors) Richard Henderson
2018-06-26 13:47   ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 33/35] target/arm: Implement SVE dot product (indexed) Richard Henderson
2018-06-26 15:30   ` Peter Maydell
2018-06-26 16:17     ` Richard Henderson
2018-06-26 16:30       ` Peter Maydell
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 34/35] target/arm: Enable SVE for aarch64-linux-user Richard Henderson
2018-06-21  1:53 ` [Qemu-devel] [PATCH v5 35/35] target/arm: Implement ARMv8.2-DotProd Richard Henderson
2018-06-26 15:38   ` Peter Maydell
2018-06-21  5:18 ` [Qemu-devel] [PATCH v5 00/35] target/arm SVE patches no-reply
2018-06-26  9:41 ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87o9fxx46d.fsf@linaro.org \
    --to=alex.bennee@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).