qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH v6 09/82] target/arm: Implement SVE2 saturating add/subtract (predicated)
Date: Fri, 30 Apr 2021 13:24:57 -0700	[thread overview]
Message-ID: <20210430202610.1136687-10-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210430202610.1136687-1-richard.henderson@linaro.org>

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper-sve.h    |  54 +++++++++++
 target/arm/sve.decode      |  11 +++
 target/arm/sve_helper.c    | 194 ++++++++++++++++++++++++++-----------
 target/arm/translate-sve.c |   7 ++
 4 files changed, 210 insertions(+), 56 deletions(-)

diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 09bc067dd4..37461c9927 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -371,6 +371,60 @@ DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_s, TCG_CALL_NO_RWG,
 DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_d, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_b, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_h, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG,
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 61a3321325..cd4f73265f 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -1148,3 +1148,14 @@ SMAXP           01000100 .. 010 100 101 ... ..... .....  @rdn_pg_rm
 UMAXP           01000100 .. 010 101 101 ... ..... .....  @rdn_pg_rm
 SMINP           01000100 .. 010 110 101 ... ..... .....  @rdn_pg_rm
 UMINP           01000100 .. 010 111 101 ... ..... .....  @rdn_pg_rm
+
+### SVE2 saturating add/subtract (predicated)
+
+SQADD_zpzz      01000100 .. 011 000 100 ... ..... .....  @rdn_pg_rm
+UQADD_zpzz      01000100 .. 011 001 100 ... ..... .....  @rdn_pg_rm
+SQSUB_zpzz      01000100 .. 011 010 100 ... ..... .....  @rdn_pg_rm
+UQSUB_zpzz      01000100 .. 011 011 100 ... ..... .....  @rdn_pg_rm
+SUQADD          01000100 .. 011 100 100 ... ..... .....  @rdn_pg_rm
+USQADD          01000100 .. 011 101 100 ... ..... .....  @rdn_pg_rm
+SQSUB_zpzz      01000100 .. 011 110 100 ... ..... .....  @rdm_pg_rn # SQSUBR
+UQSUB_zpzz      01000100 .. 011 111 100 ... ..... .....  @rdm_pg_rn # UQSUBR
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 7cc559d950..12a2078edb 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -678,6 +678,135 @@ DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS)
 DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS)
 DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
 
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max)
+{
+    return val >= max ? max : val <= min ? min : val;
+}
+
+#define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX)
+#define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX)
+#define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_sqadd_d(int64_t n, int64_t m)
+{
+    int64_t r = n + m;
+    if (((r ^ n) & ~(n ^ m)) < 0) {
+        /* Signed overflow.  */
+        return r < 0 ? INT64_MAX : INT64_MIN;
+    }
+    return r;
+}
+
+DO_ZPZZ(sve2_sqadd_zpzz_b, int8_t, H1_2, DO_SQADD_B)
+DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H)
+DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S)
+DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d)
+
+#define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX)
+#define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX)
+#define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX)
+
+static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m)
+{
+    uint64_t r = n + m;
+    return r < n ? UINT64_MAX : r;
+}
+
+DO_ZPZZ(sve2_uqadd_zpzz_b, uint8_t, H1_2, DO_UQADD_B)
+DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H)
+DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S)
+DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d)
+
+#define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX)
+#define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX)
+#define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_sqsub_d(int64_t n, int64_t m)
+{
+    int64_t r = n - m;
+    if (((r ^ n) & (n ^ m)) < 0) {
+        /* Signed overflow.  */
+        return r < 0 ? INT64_MAX : INT64_MIN;
+    }
+    return r;
+}
+
+DO_ZPZZ(sve2_sqsub_zpzz_b, int8_t, H1_2, DO_SQSUB_B)
+DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H)
+DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S)
+DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d)
+
+#define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX)
+#define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX)
+#define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX)
+
+static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m)
+{
+    return n > m ? n - m : 0;
+}
+
+DO_ZPZZ(sve2_uqsub_zpzz_b, uint8_t, H1_2, DO_UQSUB_B)
+DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H)
+DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S)
+DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d)
+
+#define DO_SUQADD_B(n, m) \
+    do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX)
+#define DO_SUQADD_H(n, m) \
+    do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX)
+#define DO_SUQADD_S(n, m) \
+    do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_suqadd_d(int64_t n, uint64_t m)
+{
+    uint64_t r = n + m;
+
+    if (n < 0) {
+        /* Note that m - abs(n) cannot underflow. */
+        if (r > INT64_MAX) {
+            /* Result is either very large positive or negative. */
+            if (m > -n) {
+                /* m > abs(n), so r is a very large positive. */
+                return INT64_MAX;
+            }
+            /* Result is negative. */
+        }
+    } else {
+        /* Both inputs are positive: check for overflow.  */
+        if (r < m || r > INT64_MAX) {
+            return INT64_MAX;
+        }
+    }
+    return r;
+}
+
+DO_ZPZZ(sve2_suqadd_zpzz_b, uint8_t, H1_2, DO_SUQADD_B)
+DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H)
+DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S)
+DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d)
+
+#define DO_USQADD_B(n, m) \
+    do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX)
+#define DO_USQADD_H(n, m) \
+    do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX)
+#define DO_USQADD_S(n, m) \
+    do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX)
+
+static inline uint64_t do_usqadd_d(uint64_t n, int64_t m)
+{
+    uint64_t r = n + m;
+
+    if (m < 0) {
+        return n < -m ? 0 : r;
+    }
+    return r < n ? UINT64_MAX : r;
+}
+
+DO_ZPZZ(sve2_usqadd_zpzz_b, uint8_t, H1_2, DO_USQADD_B)
+DO_ZPZZ(sve2_usqadd_zpzz_h, uint16_t, H1_2, DO_USQADD_H)
+DO_ZPZZ(sve2_usqadd_zpzz_s, uint32_t, H1_4, DO_USQADD_S)
+DO_ZPZZ_D(sve2_usqadd_zpzz_d, uint64_t, do_usqadd_d)
+
 #undef DO_ZPZZ
 #undef DO_ZPZZ_D
 
@@ -1614,13 +1743,7 @@ void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(int8_t)) {
-        int r = *(int8_t *)(a + i) + b;
-        if (r > INT8_MAX) {
-            r = INT8_MAX;
-        } else if (r < INT8_MIN) {
-            r = INT8_MIN;
-        }
-        *(int8_t *)(d + i) = r;
+        *(int8_t *)(d + i) = DO_SQADD_B(b, *(int8_t *)(a + i));
     }
 }
 
@@ -1629,13 +1752,7 @@ void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(int16_t)) {
-        int r = *(int16_t *)(a + i) + b;
-        if (r > INT16_MAX) {
-            r = INT16_MAX;
-        } else if (r < INT16_MIN) {
-            r = INT16_MIN;
-        }
-        *(int16_t *)(d + i) = r;
+        *(int16_t *)(d + i) = DO_SQADD_H(b, *(int16_t *)(a + i));
     }
 }
 
@@ -1644,13 +1761,7 @@ void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(int32_t)) {
-        int64_t r = *(int32_t *)(a + i) + b;
-        if (r > INT32_MAX) {
-            r = INT32_MAX;
-        } else if (r < INT32_MIN) {
-            r = INT32_MIN;
-        }
-        *(int32_t *)(d + i) = r;
+        *(int32_t *)(d + i) = DO_SQADD_S(b, *(int32_t *)(a + i));
     }
 }
 
@@ -1659,13 +1770,7 @@ void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(int64_t)) {
-        int64_t ai = *(int64_t *)(a + i);
-        int64_t r = ai + b;
-        if (((r ^ ai) & ~(ai ^ b)) < 0) {
-            /* Signed overflow.  */
-            r = (r < 0 ? INT64_MAX : INT64_MIN);
-        }
-        *(int64_t *)(d + i) = r;
+        *(int64_t *)(d + i) = do_sqadd_d(b, *(int64_t *)(a + i));
     }
 }
 
@@ -1678,13 +1783,7 @@ void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
-        int r = *(uint8_t *)(a + i) + b;
-        if (r > UINT8_MAX) {
-            r = UINT8_MAX;
-        } else if (r < 0) {
-            r = 0;
-        }
-        *(uint8_t *)(d + i) = r;
+        *(uint8_t *)(d + i) = DO_UQADD_B(b, *(uint8_t *)(a + i));
     }
 }
 
@@ -1693,13 +1792,7 @@ void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
-        int r = *(uint16_t *)(a + i) + b;
-        if (r > UINT16_MAX) {
-            r = UINT16_MAX;
-        } else if (r < 0) {
-            r = 0;
-        }
-        *(uint16_t *)(d + i) = r;
+        *(uint16_t *)(d + i) = DO_UQADD_H(b, *(uint16_t *)(a + i));
     }
 }
 
@@ -1708,13 +1801,7 @@ void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
-        int64_t r = *(uint32_t *)(a + i) + b;
-        if (r > UINT32_MAX) {
-            r = UINT32_MAX;
-        } else if (r < 0) {
-            r = 0;
-        }
-        *(uint32_t *)(d + i) = r;
+        *(uint32_t *)(d + i) = DO_UQADD_S(b, *(uint32_t *)(a + i));
     }
 }
 
@@ -1723,11 +1810,7 @@ void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
-        uint64_t r = *(uint64_t *)(a + i) + b;
-        if (r < b) {
-            r = UINT64_MAX;
-        }
-        *(uint64_t *)(d + i) = r;
+        *(uint64_t *)(d + i) = do_uqadd_d(b, *(uint64_t *)(a + i));
     }
 }
 
@@ -1736,8 +1819,7 @@ void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc)
     intptr_t i, oprsz = simd_oprsz(desc);
 
     for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
-        uint64_t ai = *(uint64_t *)(a + i);
-        *(uint64_t *)(d + i) = (ai < b ? 0 : ai - b);
+        *(uint64_t *)(d + i) = do_uqsub_d(*(uint64_t *)(a + i), b);
     }
 }
 
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 2d449c9b57..609d5ae7b7 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -5963,3 +5963,10 @@ DO_SVE2_ZPZZ(SMAXP, smaxp)
 DO_SVE2_ZPZZ(UMAXP, umaxp)
 DO_SVE2_ZPZZ(SMINP, sminp)
 DO_SVE2_ZPZZ(UMINP, uminp)
+
+DO_SVE2_ZPZZ(SQADD_zpzz, sqadd)
+DO_SVE2_ZPZZ(UQADD_zpzz, uqadd)
+DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub)
+DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub)
+DO_SVE2_ZPZZ(SUQADD, suqadd)
+DO_SVE2_ZPZZ(USQADD, usqadd)
-- 
2.25.1



  parent reply	other threads:[~2021-04-30 20:39 UTC|newest]

Thread overview: 184+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-30 20:24 [PATCH v6 00/82] target/arm: Implement SVE2 Richard Henderson
2021-04-30 20:24 ` [PATCH v6 01/82] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Richard Henderson
2021-05-11  7:55   ` Peter Maydell
2021-05-11 17:20     ` Richard Henderson
2021-04-30 20:24 ` [PATCH v6 02/82] target/arm: Implement SVE2 Integer Multiply - Unpredicated Richard Henderson
2021-05-11  8:00   ` Peter Maydell
2021-04-30 20:24 ` [PATCH v6 03/82] target/arm: Implement SVE2 integer pairwise add and accumulate long Richard Henderson
2021-05-11  8:02   ` Peter Maydell
2021-04-30 20:24 ` [PATCH v6 04/82] target/arm: Implement SVE2 integer unary operations (predicated) Richard Henderson
2021-05-11  8:10   ` Peter Maydell
2021-05-11 17:22     ` Richard Henderson
2021-04-30 20:24 ` [PATCH v6 05/82] target/arm: Split out saturating/rounding shifts from neon Richard Henderson
2021-05-11  8:36   ` Peter Maydell
2021-04-30 20:24 ` [PATCH v6 06/82] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Richard Henderson
2021-05-11  8:43   ` Peter Maydell
2021-05-11 15:40     ` Richard Henderson
2021-05-11 15:56       ` Peter Maydell
2021-04-30 20:24 ` [PATCH v6 07/82] target/arm: Implement SVE2 integer halving add/subtract (predicated) Richard Henderson
2021-05-11  8:45   ` Peter Maydell
2021-04-30 20:24 ` [PATCH v6 08/82] target/arm: Implement SVE2 integer pairwise arithmetic Richard Henderson
2021-05-11  8:58   ` Peter Maydell
2021-04-30 20:24 ` Richard Henderson [this message]
2021-05-11  9:07   ` [PATCH v6 09/82] target/arm: Implement SVE2 saturating add/subtract (predicated) Peter Maydell
2021-04-30 20:24 ` [PATCH v6 10/82] target/arm: Implement SVE2 integer add/subtract long Richard Henderson
2021-05-11  9:11   ` Peter Maydell
2021-04-30 20:24 ` [PATCH v6 11/82] target/arm: Implement SVE2 integer add/subtract interleaved long Richard Henderson
2021-05-11  9:12   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 12/82] target/arm: Implement SVE2 integer add/subtract wide Richard Henderson
2021-05-11  9:14   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 13/82] target/arm: Implement SVE2 integer multiply long Richard Henderson
2021-05-11 12:21   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 14/82] target/arm: Implement PMULLB and PMULLT Richard Henderson
2021-05-11 12:29   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 15/82] target/arm: Implement SVE2 bitwise shift left long Richard Henderson
2021-05-11 12:40   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 16/82] target/arm: Implement SVE2 bitwise exclusive-or interleaved Richard Henderson
2021-05-11 12:43   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 17/82] target/arm: Implement SVE2 bitwise permute Richard Henderson
2021-05-11 12:58   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 18/82] target/arm: Implement SVE2 complex integer add Richard Henderson
2021-05-11 13:02   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 19/82] target/arm: Implement SVE2 integer absolute difference and accumulate long Richard Henderson
2021-05-11 15:27   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 20/82] target/arm: Implement SVE2 integer add/subtract long with carry Richard Henderson
2021-05-11 15:48   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 21/82] target/arm: Implement SVE2 bitwise shift right and accumulate Richard Henderson
2021-05-11 15:57   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 22/82] target/arm: Implement SVE2 bitwise shift and insert Richard Henderson
2021-05-11 15:58   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 23/82] target/arm: Implement SVE2 integer absolute difference and accumulate Richard Henderson
2021-05-11 15:59   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 24/82] target/arm: Implement SVE2 saturating extract narrow Richard Henderson
2021-05-11 16:08   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 25/82] target/arm: Implement SVE2 floating-point pairwise Richard Henderson
2021-05-11 16:09   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 26/82] target/arm: Implement SVE2 SHRN, RSHRN Richard Henderson
2021-05-12  8:52   ` Peter Maydell
2021-05-12 16:07     ` Richard Henderson
2021-04-30 20:25 ` [PATCH v6 27/82] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Richard Henderson
2021-05-12  8:54   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 28/82] target/arm: Implement SVE2 UQSHRN, UQRSHRN Richard Henderson
2021-05-12  8:56   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 29/82] target/arm: Implement SVE2 SQSHRN, SQRSHRN Richard Henderson
2021-05-12  8:59   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 30/82] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Richard Henderson
2021-05-12  9:07   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 31/82] target/arm: Implement SVE2 WHILERW, WHILEWR Richard Henderson
2021-05-12 14:06   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 32/82] target/arm: Implement SVE2 bitwise ternary operations Richard Henderson
2021-05-12 14:12   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 33/82] target/arm: Implement SVE2 MATCH, NMATCH Richard Henderson
2021-04-30 20:25 ` [PATCH v6 34/82] target/arm: Implement SVE2 saturating multiply-add long Richard Henderson
2021-05-12 14:21   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 35/82] target/arm: Implement SVE2 saturating multiply-add high Richard Henderson
2021-05-12 15:12   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 36/82] target/arm: Implement SVE2 integer multiply-add long Richard Henderson
2021-05-12 15:13   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 37/82] target/arm: Implement SVE2 complex integer multiply-add Richard Henderson
2021-05-12 15:20   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 38/82] target/arm: Implement SVE2 ADDHNB, ADDHNT Richard Henderson
2021-05-12 15:23   ` Peter Maydell
2021-05-12 16:17     ` Richard Henderson
2021-04-30 20:25 ` [PATCH v6 39/82] target/arm: Implement SVE2 RADDHNB, RADDHNT Richard Henderson
2021-05-12 15:24   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 40/82] target/arm: Implement SVE2 SUBHNB, SUBHNT Richard Henderson
2021-05-12 15:24   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 41/82] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Richard Henderson
2021-05-12 15:25   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 42/82] target/arm: Implement SVE2 HISTCNT, HISTSEG Richard Henderson
2021-05-13 10:22   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 43/82] target/arm: Implement SVE2 XAR Richard Henderson
2021-05-13 10:27   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 44/82] target/arm: Implement SVE2 scatter store insns Richard Henderson
2021-05-13 10:31   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 45/82] target/arm: Implement SVE2 gather load insns Richard Henderson
2021-05-13 10:33   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 46/82] target/arm: Implement SVE2 FMMLA Richard Henderson
2021-05-13 10:38   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 47/82] target/arm: Implement SVE2 SPLICE, EXT Richard Henderson
2021-05-13 10:41   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 48/82] target/arm: Pass separate addend to {U, S}DOT helpers Richard Henderson
2021-05-13 10:47   ` Peter Maydell
2021-05-14 16:33     ` Richard Henderson
2021-05-14 16:35       ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 49/82] target/arm: Pass separate addend to FCMLA helpers Richard Henderson
2021-04-30 20:25 ` [PATCH v6 50/82] target/arm: Split out formats for 2 vectors + 1 index Richard Henderson
2021-05-13 10:49   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 51/82] target/arm: Split out formats for 3 " Richard Henderson
2021-05-13 10:53   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 52/82] target/arm: Implement SVE2 integer multiply (indexed) Richard Henderson
2021-05-13 12:31   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 53/82] target/arm: Implement SVE2 integer multiply-add (indexed) Richard Henderson
2021-05-13 12:33   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 54/82] target/arm: Implement SVE2 saturating multiply-add high (indexed) Richard Henderson
2021-05-13 12:35   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 55/82] target/arm: Implement SVE2 saturating multiply-add (indexed) Richard Henderson
2021-05-13 12:42   ` Peter Maydell
2021-05-14 18:17     ` Richard Henderson
2021-04-30 20:25 ` [PATCH v6 56/82] target/arm: Implement SVE2 saturating multiply (indexed) Richard Henderson
2021-05-13 12:45   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 57/82] target/arm: Implement SVE2 signed saturating doubling multiply high Richard Henderson
2021-05-13 12:48   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 58/82] target/arm: Implement SVE2 saturating multiply high (indexed) Richard Henderson
2021-05-13 12:51   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 59/82] target/arm: Implement SVE mixed sign dot product (indexed) Richard Henderson
2021-05-13 12:57   ` Peter Maydell
2021-05-14 18:47     ` Richard Henderson
2021-04-30 20:25 ` [PATCH v6 60/82] target/arm: Implement SVE mixed sign dot product Richard Henderson
2021-05-13 13:01   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 61/82] target/arm: Implement SVE2 crypto unary operations Richard Henderson
2021-05-13 13:02   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 62/82] target/arm: Implement SVE2 crypto destructive binary operations Richard Henderson
2021-05-13 13:04   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 63/82] target/arm: Implement SVE2 crypto constructive " Richard Henderson
2021-05-13 13:52   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 64/82] target/arm: Implement SVE2 TBL, TBX Richard Henderson
2021-05-13 13:59   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 65/82] target/arm: Implement SVE2 FCVTNT Richard Henderson
2021-05-13 14:01   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 66/82] target/arm: Implement SVE2 FCVTLT Richard Henderson
2021-05-13 14:03   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 67/82] target/arm: Implement SVE2 FCVTXNT, FCVTX Richard Henderson
2021-05-13 14:06   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 68/82] target/arm: Implement SVE2 FLOGB Richard Henderson
2021-05-13 14:18   ` Peter Maydell
2021-05-15 16:14     ` Richard Henderson
2021-04-30 20:25 ` [PATCH v6 69/82] target/arm: Share table of sve load functions Richard Henderson
2021-05-13 14:25   ` Peter Maydell
2021-05-15 16:25     ` Richard Henderson
2021-04-30 20:25 ` [PATCH v6 70/82] target/arm: Implement SVE2 LD1RO Richard Henderson
2021-05-13 16:41   ` Peter Maydell
2021-04-30 20:25 ` [PATCH v6 71/82] target/arm: Implement 128-bit ZIP, UZP, TRN Richard Henderson
2021-05-13 16:48   ` Peter Maydell
2021-04-30 20:26 ` [PATCH v6 72/82] target/arm: Implement SVE2 bitwise shift immediate Richard Henderson
2021-05-13 16:57   ` Peter Maydell
2021-05-15 16:53     ` Richard Henderson
2021-04-30 20:26 ` [PATCH v6 73/82] target/arm: Implement SVE2 fp multiply-add long Richard Henderson
2021-05-13 17:04   ` Peter Maydell
2021-05-15 17:09     ` Richard Henderson
2021-04-30 20:26 ` [PATCH v6 74/82] target/arm: Implement aarch64 SUDOT, USDOT Richard Henderson
2021-05-13 17:09   ` Peter Maydell
2021-04-30 20:26 ` [PATCH v6 75/82] target/arm: Split out do_neon_ddda_fpst Richard Henderson
2021-05-13 17:13   ` Peter Maydell
2021-04-30 20:26 ` [PATCH v6 76/82] target/arm: Remove unused fpst from VDOT_scalar Richard Henderson
2021-05-13 17:18   ` Peter Maydell
2021-04-30 20:26 ` [PATCH v6 77/82] target/arm: Fix decode for VDOT (indexed) Richard Henderson
2021-05-13 19:25   ` Peter Maydell
2021-05-15 17:13     ` Richard Henderson
2021-05-16 16:09       ` Peter Maydell
2021-05-17 15:48         ` Richard Henderson
2021-05-15 17:20     ` Richard Henderson
2021-04-30 20:26 ` [PATCH v6 78/82] target/arm: Split decode of VSDOT and VUDOT Richard Henderson
2021-05-13 19:27   ` Peter Maydell
2021-04-30 20:26 ` [PATCH v6 79/82] target/arm: Implement aarch32 VSUDOT, VUSDOT Richard Henderson
2021-05-13 19:32   ` Peter Maydell
2021-04-30 20:26 ` [PATCH v6 80/82] target/arm: Implement integer matrix multiply accumulate Richard Henderson
2021-05-13 19:49   ` Peter Maydell
2021-05-14 16:58     ` Richard Henderson
2021-04-30 20:26 ` [PATCH v6 81/82] linux-user/aarch64: Enable hwcap bits for sve2 and related extensions Richard Henderson
2021-05-13 19:33   ` Peter Maydell
2021-04-30 20:26 ` [PATCH v6 82/82] target/arm: Enable SVE2 " Richard Henderson
2021-05-13 19:35   ` Peter Maydell
2021-05-14 17:21     ` Richard Henderson
2021-05-13 19:49 ` [PATCH v6 00/82] target/arm: Implement SVE2 Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210430202610.1136687-10-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).