From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>, qemu-arm@nongnu.org
Subject: [PATCH v7 32/92] target/arm: Implement SVE2 bitwise ternary operations
Date: Mon, 24 May 2021 18:02:58 -0700 [thread overview]
Message-ID: <20210525010358.152808-33-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210525010358.152808-1-richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper-sve.h | 6 ++
target/arm/sve.decode | 12 +++
target/arm/sve_helper.c | 50 +++++++++
target/arm/translate-sve.c | 213 +++++++++++++++++++++++++++++++++++++
4 files changed, 281 insertions(+)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 5bf9fdc7a3..df617e3351 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2543,3 +2543,9 @@ DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_eor3, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bsl1n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bsl2n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_nbsl, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index f365907518..bf673e2f16 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -124,6 +124,10 @@
@rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \
&rrrr_esz ra=%reg_movprfx
+# Four operand with unused vector element size
+@rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \
+ &rrrr_esz esz=0 rn=%reg_movprfx
+
# Three operand with "memory" size, aka immediate left shift
@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
@@ -379,6 +383,14 @@ ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
+# SVE2 bitwise ternary operations
+EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
+BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
+BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+
### SVE Index Generation Group
# SVE index generation (immediate start, immediate increment)
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 7450977630..17889bc316 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -6797,3 +6797,53 @@ DO_ST1_ZPZ_D(dd_be, zd, MO_64)
#undef DO_ST1_ZPZ_S
#undef DO_ST1_ZPZ_D
+
+void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = n[i] ^ m[i] ^ k[i];
+ }
+}
+
+void HELPER(sve2_bcax)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = n[i] ^ (m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_bsl1n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = (~n[i] & k[i]) | (m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_bsl2n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = (n[i] & k[i]) | (~m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i]));
+ }
+}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 64aecc2db4..093424fd27 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -217,6 +217,17 @@ static void gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
vec_full_reg_offset(s, rm), vsz, vsz);
}
+/* Invoke a vector expander on four Zregs. */
+static void gen_gvec_fn_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
+ int esz, int rd, int rn, int rm, int ra)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ gvec_fn(esz, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra), vsz, vsz);
+}
+
/* Invoke a vector move on two Zregs. */
static bool do_mov_z(DisasContext *s, int rd, int rn)
{
@@ -329,6 +340,208 @@ static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a)
return do_zzz_fn(s, a, tcg_gen_gvec_andc);
}
+static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzzz(s, fn, a->esz, a->rd, a->rn, a->rm, a->ra);
+ }
+ return true;
+}
+
+static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_xor_i64(d, n, m);
+ tcg_gen_xor_i64(d, d, k);
+}
+
+static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_xor_vec(vece, d, n, m);
+ tcg_gen_xor_vec(vece, d, d, k);
+}
+
+static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_eor3_i64,
+ .fniv = gen_eor3_vec,
+ .fno = gen_helper_sve2_eor3,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_EOR3(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_eor3);
+}
+
+static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_andc_i64(d, m, k);
+ tcg_gen_xor_i64(d, d, n);
+}
+
+static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_andc_vec(vece, d, m, k);
+ tcg_gen_xor_vec(vece, d, d, n);
+}
+
+static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bcax_i64,
+ .fniv = gen_bcax_vec,
+ .fno = gen_helper_sve2_bcax,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BCAX(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bcax);
+}
+
+static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ /* BSL differs from the generic bitsel in argument ordering. */
+ tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
+}
+
+static bool trans_BSL(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl);
+}
+
+static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_andc_i64(n, k, n);
+ tcg_gen_andc_i64(m, m, k);
+ tcg_gen_or_i64(d, n, m);
+}
+
+static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ if (TCG_TARGET_HAS_bitsel_vec) {
+ tcg_gen_not_vec(vece, n, n);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ } else {
+ tcg_gen_andc_vec(vece, n, k, n);
+ tcg_gen_andc_vec(vece, m, m, k);
+ tcg_gen_or_vec(vece, d, n, m);
+ }
+}
+
+static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bsl1n_i64,
+ .fniv = gen_bsl1n_vec,
+ .fno = gen_helper_sve2_bsl1n,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BSL1N(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl1n);
+}
+
+static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ /*
+ * Z[dn] = (n & k) | (~m & ~k)
+ * = | ~(m | k)
+ */
+ tcg_gen_and_i64(n, n, k);
+ if (TCG_TARGET_HAS_orc_i64) {
+ tcg_gen_or_i64(m, m, k);
+ tcg_gen_orc_i64(d, n, m);
+ } else {
+ tcg_gen_nor_i64(m, m, k);
+ tcg_gen_or_i64(d, n, m);
+ }
+}
+
+static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ if (TCG_TARGET_HAS_bitsel_vec) {
+ tcg_gen_not_vec(vece, m, m);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ } else {
+ tcg_gen_and_vec(vece, n, n, k);
+ tcg_gen_or_vec(vece, m, m, k);
+ tcg_gen_orc_vec(vece, d, n, m);
+ }
+}
+
+static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bsl2n_i64,
+ .fniv = gen_bsl2n_vec,
+ .fno = gen_helper_sve2_bsl2n,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BSL2N(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl2n);
+}
+
+static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_and_i64(n, n, k);
+ tcg_gen_andc_i64(m, m, k);
+ tcg_gen_nor_i64(d, n, m);
+}
+
+static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ tcg_gen_not_vec(vece, d, d);
+}
+
+static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_nbsl_i64,
+ .fniv = gen_nbsl_vec,
+ .fno = gen_helper_sve2_nbsl,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_NBSL(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_nbsl);
+}
+
/*
*** SVE Integer Arithmetic - Unpredicated Group
*/
--
2.25.1
next prev parent reply other threads:[~2021-05-25 1:31 UTC|newest]
Thread overview: 97+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-25 1:02 [PATCH v7 00/92] target/arm: Implement SVE2 Richard Henderson
2021-05-25 1:02 ` [PATCH v7 01/92] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Richard Henderson
2022-07-25 7:05 ` Zenghui Yu via
2022-07-25 14:46 ` Richard Henderson
2021-05-25 1:02 ` [PATCH v7 02/92] target/arm: Implement SVE2 Integer Multiply - Unpredicated Richard Henderson
2021-05-25 1:02 ` [PATCH v7 03/92] target/arm: Implement SVE2 integer pairwise add and accumulate long Richard Henderson
2021-05-25 1:02 ` [PATCH v7 04/92] target/arm: Implement SVE2 integer unary operations (predicated) Richard Henderson
2021-05-25 1:02 ` [PATCH v7 05/92] target/arm: Split out saturating/rounding shifts from neon Richard Henderson
2021-05-25 1:02 ` [PATCH v7 06/92] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Richard Henderson
2021-05-25 1:02 ` [PATCH v7 07/92] target/arm: Implement SVE2 integer halving add/subtract (predicated) Richard Henderson
2021-05-25 1:02 ` [PATCH v7 08/92] target/arm: Implement SVE2 integer pairwise arithmetic Richard Henderson
2021-05-25 1:02 ` [PATCH v7 09/92] target/arm: Implement SVE2 saturating add/subtract (predicated) Richard Henderson
2021-05-25 1:02 ` [PATCH v7 10/92] target/arm: Implement SVE2 integer add/subtract long Richard Henderson
2021-05-25 1:02 ` [PATCH v7 11/92] target/arm: Implement SVE2 integer add/subtract interleaved long Richard Henderson
2021-05-25 1:02 ` [PATCH v7 12/92] target/arm: Implement SVE2 integer add/subtract wide Richard Henderson
2021-05-25 1:02 ` [PATCH v7 13/92] target/arm: Implement SVE2 integer multiply long Richard Henderson
2021-05-25 1:02 ` [PATCH v7 14/92] target/arm: Implement SVE2 PMULLB, PMULLT Richard Henderson
2021-05-25 1:02 ` [PATCH v7 15/92] target/arm: Implement SVE2 bitwise shift left long Richard Henderson
2021-05-25 1:02 ` [PATCH v7 16/92] target/arm: Implement SVE2 bitwise exclusive-or interleaved Richard Henderson
2021-05-25 1:02 ` [PATCH v7 17/92] target/arm: Implement SVE2 bitwise permute Richard Henderson
2021-05-25 1:02 ` [PATCH v7 18/92] target/arm: Implement SVE2 complex integer add Richard Henderson
2021-05-25 1:02 ` [PATCH v7 19/92] target/arm: Implement SVE2 integer absolute difference and accumulate long Richard Henderson
2021-05-25 1:02 ` [PATCH v7 20/92] target/arm: Implement SVE2 integer add/subtract long with carry Richard Henderson
2021-05-25 1:02 ` [PATCH v7 21/92] target/arm: Implement SVE2 bitwise shift right and accumulate Richard Henderson
2021-05-25 1:02 ` [PATCH v7 22/92] target/arm: Implement SVE2 bitwise shift and insert Richard Henderson
2021-05-25 1:02 ` [PATCH v7 23/92] target/arm: Implement SVE2 integer absolute difference and accumulate Richard Henderson
2021-05-25 1:02 ` [PATCH v7 24/92] target/arm: Implement SVE2 saturating extract narrow Richard Henderson
2021-05-25 1:02 ` [PATCH v7 25/92] target/arm: Implement SVE2 floating-point pairwise Richard Henderson
2021-05-25 1:02 ` [PATCH v7 26/92] target/arm: Implement SVE2 SHRN, RSHRN Richard Henderson
2021-05-25 1:02 ` [PATCH v7 27/92] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Richard Henderson
2021-05-25 1:02 ` [PATCH v7 28/92] target/arm: Implement SVE2 UQSHRN, UQRSHRN Richard Henderson
2021-05-25 1:02 ` [PATCH v7 29/92] target/arm: Implement SVE2 SQSHRN, SQRSHRN Richard Henderson
2021-05-25 1:02 ` [PATCH v7 30/92] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Richard Henderson
2021-05-25 1:02 ` [PATCH v7 31/92] target/arm: Implement SVE2 WHILERW, WHILEWR Richard Henderson
2021-05-25 1:02 ` Richard Henderson [this message]
2021-05-25 1:02 ` [PATCH v7 33/92] target/arm: Implement SVE2 MATCH, NMATCH Richard Henderson
2021-05-25 1:03 ` [PATCH v7 34/92] target/arm: Implement SVE2 saturating multiply-add long Richard Henderson
2021-05-25 1:03 ` [PATCH v7 35/92] target/arm: Implement SVE2 saturating multiply-add high Richard Henderson
2021-05-25 1:03 ` [PATCH v7 36/92] target/arm: Implement SVE2 integer multiply-add long Richard Henderson
2021-05-25 1:03 ` [PATCH v7 37/92] target/arm: Implement SVE2 complex integer multiply-add Richard Henderson
2021-05-25 1:03 ` [PATCH v7 38/92] target/arm: Implement SVE2 ADDHNB, ADDHNT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 39/92] target/arm: Implement SVE2 RADDHNB, RADDHNT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 40/92] target/arm: Implement SVE2 SUBHNB, SUBHNT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 41/92] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 42/92] target/arm: Implement SVE2 HISTCNT, HISTSEG Richard Henderson
2021-05-25 1:03 ` [PATCH v7 43/92] target/arm: Implement SVE2 XAR Richard Henderson
2021-05-25 1:03 ` [PATCH v7 44/92] target/arm: Implement SVE2 scatter store insns Richard Henderson
2021-05-25 1:03 ` [PATCH v7 45/92] target/arm: Implement SVE2 gather load insns Richard Henderson
2021-05-25 1:03 ` [PATCH v7 46/92] target/arm: Implement SVE2 FMMLA Richard Henderson
2021-05-25 1:03 ` [PATCH v7 47/92] target/arm: Implement SVE2 SPLICE, EXT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 48/92] target/arm: Use correct output type for gvec_sdot_*_b Richard Henderson
2021-05-25 1:03 ` [PATCH v7 49/92] target/arm: Pass separate addend to {U, S}DOT helpers Richard Henderson
2021-05-25 1:03 ` [PATCH v7 50/92] target/arm: Pass separate addend to FCMLA helpers Richard Henderson
2021-05-25 1:03 ` [PATCH v7 51/92] target/arm: Split out formats for 2 vectors + 1 index Richard Henderson
2021-05-25 1:03 ` [PATCH v7 52/92] target/arm: Split out formats for 3 " Richard Henderson
2021-05-25 1:03 ` [PATCH v7 53/92] target/arm: Implement SVE2 integer multiply (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 54/92] target/arm: Implement SVE2 integer multiply-add (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 55/92] target/arm: Implement SVE2 saturating multiply-add high (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 56/92] target/arm: Implement SVE2 saturating multiply-add (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 57/92] target/arm: Implement SVE2 saturating multiply (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 58/92] target/arm: Implement SVE2 signed saturating doubling multiply high Richard Henderson
2021-05-25 1:03 ` [PATCH v7 59/92] target/arm: Implement SVE2 saturating multiply high (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 60/92] target/arm: Implement SVE2 multiply-add long (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 61/92] target/arm: Implement SVE2 integer multiply " Richard Henderson
2021-05-25 1:03 ` [PATCH v7 62/92] target/arm: Implement SVE2 complex integer multiply-add (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 63/92] target/arm: Implement SVE2 complex integer dot product Richard Henderson
2021-05-25 1:03 ` [PATCH v7 64/92] target/arm: Macroize helper_gvec_{s,u}dot_{b,h} Richard Henderson
2021-05-25 1:03 ` [PATCH v7 65/92] target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h} Richard Henderson
2021-05-25 1:03 ` [PATCH v7 66/92] target/arm: Implement SVE mixed sign dot product (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 67/92] target/arm: Implement SVE mixed sign dot product Richard Henderson
2021-05-25 1:03 ` [PATCH v7 68/92] target/arm: Implement SVE2 crypto unary operations Richard Henderson
2021-05-25 1:03 ` [PATCH v7 69/92] target/arm: Implement SVE2 crypto destructive binary operations Richard Henderson
2021-05-25 1:03 ` [PATCH v7 70/92] target/arm: Implement SVE2 crypto constructive " Richard Henderson
2021-05-25 1:03 ` [PATCH v7 71/92] target/arm: Implement SVE2 TBL, TBX Richard Henderson
2021-05-25 1:03 ` [PATCH v7 72/92] target/arm: Implement SVE2 FCVTNT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 73/92] target/arm: Implement SVE2 FCVTLT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 74/92] target/arm: Implement SVE2 FCVTXNT, FCVTX Richard Henderson
2021-05-25 1:03 ` [PATCH v7 75/92] target/arm: Implement SVE2 FLOGB Richard Henderson
2021-05-25 1:03 ` [PATCH v7 76/92] target/arm: Share table of sve load functions Richard Henderson
2021-05-25 1:03 ` [PATCH v7 77/92] target/arm: Tidy do_ldrq Richard Henderson
2021-05-25 1:03 ` [PATCH v7 78/92] target/arm: Implement SVE2 LD1RO Richard Henderson
2021-05-25 1:03 ` [PATCH v7 79/92] target/arm: Implement 128-bit ZIP, UZP, TRN Richard Henderson
2021-05-25 1:03 ` [PATCH v7 80/92] target/arm: Implement SVE2 bitwise shift immediate Richard Henderson
2021-05-25 1:03 ` [PATCH v7 81/92] target/arm: Move endian adjustment macros to vec_internal.h Richard Henderson
2021-05-25 1:03 ` [PATCH v7 82/92] target/arm: Implement SVE2 fp multiply-add long Richard Henderson
2021-05-25 1:03 ` [PATCH v7 83/92] target/arm: Implement aarch64 SUDOT, USDOT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 84/92] target/arm: Split out do_neon_ddda_fpst Richard Henderson
2021-05-25 1:03 ` [PATCH v7 85/92] target/arm: Remove unused fpst from VDOT_scalar Richard Henderson
2021-05-25 1:03 ` [PATCH v7 86/92] target/arm: Fix decode for VDOT (indexed) Richard Henderson
2021-05-25 1:03 ` [PATCH v7 87/92] target/arm: Split out do_neon_ddda Richard Henderson
2021-05-25 1:03 ` [PATCH v7 88/92] target/arm: Split decode of VSDOT and VUDOT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 89/92] target/arm: Implement aarch32 VSUDOT, VUSDOT Richard Henderson
2021-05-25 1:03 ` [PATCH v7 90/92] target/arm: Implement integer matrix multiply accumulate Richard Henderson
2021-05-25 1:03 ` [PATCH v7 91/92] linux-user/aarch64: Enable hwcap bits for sve2 and related extensions Richard Henderson
2021-05-25 1:03 ` [PATCH v7 92/92] target/arm: Enable SVE2 " Richard Henderson
2021-05-25 2:37 ` [PATCH v7 00/92] target/arm: Implement SVE2 no-reply
2021-05-25 12:33 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210525010358.152808-33-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).