qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH v6 17/26] target/arm: Use vector infrastructure for aa64 constant shifts
Date: Tue, 21 Nov 2017 22:25:25 +0100	[thread overview]
Message-ID: <20171121212534.5177-18-richard.henderson@linaro.org> (raw)
In-Reply-To: <20171121212534.5177-1-richard.henderson@linaro.org>

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/translate-a64.c | 386 ++++++++++++++++++++++++++++++++++++++-------
 tcg/tcg-op-gvec.c          |  18 ++-
 tcg/tcg-op-vec.c           |   9 +-
 3 files changed, 351 insertions(+), 62 deletions(-)

diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 8769b4505a..c47faa5633 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -6432,17 +6432,6 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
     }
 }
 
-/* Common SHL/SLI - Shift left with an optional insert */
-static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
-                                 bool insert, int shift)
-{
-    if (insert) { /* SLI */
-        tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, shift, 64 - shift);
-    } else { /* SHL */
-        tcg_gen_shli_i64(tcg_res, tcg_src, shift);
-    }
-}
-
 /* SRI: shift right with insert */
 static void handle_shri_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
                                  int size, int shift)
@@ -6546,7 +6535,11 @@ static void handle_scalar_simd_shli(DisasContext *s, bool insert,
     tcg_rn = read_fp_dreg(s, rn);
     tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
 
-    handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
+    if (insert) {
+        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
+    } else {
+        tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
+    }
 
     write_fp_dreg(s, rd, tcg_rd);
 
@@ -8283,16 +8276,195 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
     }
 }
 
+static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_vec_sar8i_i64(a, a, shift);
+    tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_vec_sar16i_i64(a, a, shift);
+    tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift)
+{
+    tcg_gen_sari_i32(a, a, shift);
+    tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_sari_i64(a, a, shift);
+    tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigned sh)
+{
+    tcg_gen_sari_vec(vece, a, a, sh);
+    tcg_gen_add_vec(vece, d, d, a);
+}
+
+static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_vec_shr8i_i64(a, a, shift);
+    tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_vec_shr16i_i64(a, a, shift);
+    tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift)
+{
+    tcg_gen_shri_i32(a, a, shift);
+    tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_shri_i64(a, a, shift);
+    tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigned sh)
+{
+    tcg_gen_shri_vec(vece, a, a, sh);
+    tcg_gen_add_vec(vece, d, d, a);
+}
+
+static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    uint64_t mask = (0xff >> shift) * (-1ull / 0xff);
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_shri_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    uint64_t mask = (0xffff >> shift) * (-1ull / 0xffff);
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_shri_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift)
+{
+    tcg_gen_shri_i32(a, a, shift);
+    tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
+}
+
+static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_shri_i64(a, a, shift);
+    tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
+}
+
+static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigned sh)
+{
+    uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    TCGv_vec m = tcg_temp_new_vec_matching(d);
+
+    tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
+    tcg_gen_shri_vec(vece, t, a, sh);
+    tcg_gen_and_vec(vece, d, d, m);
+    tcg_gen_or_vec(vece, d, d, t);
+
+    tcg_temp_free_vec(t);
+    tcg_temp_free_vec(m);
+}
+
 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
                                  int immh, int immb, int opcode, int rn, int rd)
 {
+    static const GVecGen2i ssra_op[4] = {
+        { .fni8 = gen_ssra8_i64,
+          .fniv = gen_ssra_vec,
+          .load_dest = true,
+          .opc = INDEX_op_sari_vec,
+          .vece = MO_8 },
+        { .fni8 = gen_ssra16_i64,
+          .fniv = gen_ssra_vec,
+          .load_dest = true,
+          .opc = INDEX_op_sari_vec,
+          .vece = MO_16 },
+        { .fni4 = gen_ssra32_i32,
+          .fniv = gen_ssra_vec,
+          .load_dest = true,
+          .opc = INDEX_op_sari_vec,
+          .vece = MO_32 },
+        { .fni8 = gen_ssra64_i64,
+          .fniv = gen_ssra_vec,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .load_dest = true,
+          .opc = INDEX_op_sari_vec,
+          .vece = MO_64 },
+    };
+    static const GVecGen2i usra_op[4] = {
+        { .fni8 = gen_usra8_i64,
+          .fniv = gen_usra_vec,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_8, },
+        { .fni8 = gen_usra16_i64,
+          .fniv = gen_usra_vec,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_16, },
+        { .fni4 = gen_usra32_i32,
+          .fniv = gen_usra_vec,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_32, },
+        { .fni8 = gen_usra64_i64,
+          .fniv = gen_usra_vec,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_64, },
+    };
+    static const GVecGen2i sri_op[4] = {
+        { .fni8 = gen_shr8_ins_i64,
+          .fniv = gen_shr_ins_vec,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_8 },
+        { .fni8 = gen_shr16_ins_i64,
+          .fniv = gen_shr_ins_vec,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_16 },
+        { .fni4 = gen_shr32_ins_i32,
+          .fniv = gen_shr_ins_vec,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_32 },
+        { .fni8 = gen_shr64_ins_i64,
+          .fniv = gen_shr_ins_vec,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .load_dest = true,
+          .opc = INDEX_op_shri_vec,
+          .vece = MO_64 },
+    };
+
     int size = 32 - clz32(immh) - 1;
     int immhb = immh << 3 | immb;
     int shift = 2 * (8 << size) - immhb;
     bool accumulate = false;
-    bool round = false;
-    bool insert = false;
     int dsize = is_q ? 128 : 64;
     int esize = 8 << size;
     int elements = dsize/esize;
@@ -8300,6 +8472,8 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
     TCGv_i64 tcg_rn = new_tmp_a64(s);
     TCGv_i64 tcg_rd = new_tmp_a64(s);
     TCGv_i64 tcg_round;
+    uint64_t round_const;
+    const GVecGen2i *gvec_op;
     int i;
 
     if (extract32(immh, 3, 1) && !is_q) {
@@ -8318,64 +8492,141 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
 
     switch (opcode) {
     case 0x02: /* SSRA / USRA (accumulate) */
-        accumulate = true;
-        break;
+        if (is_u) {
+            /* Shift count same as element size produces zero to add.  */
+            if (shift == 8 << size) {
+                goto done;
+            }
+            gvec_op = &usra_op[size];
+        } else {
+            /* Shift count same as element size produces all sign to add.  */
+            if (shift == 8 << size) {
+                shift -= 1;
+            }
+            gvec_op = &ssra_op[size];
+        }
+        goto do_gvec;
+    case 0x08: /* SRI */
+        /* Shift count same as element size is valid but does nothing.  */
+        if (shift == 8 << size) {
+            goto done;
+        }
+        gvec_op = &sri_op[size];
+    do_gvec:
+        tcg_gen_gvec_2i(vec_full_reg_offset(s, rd),
+                        vec_full_reg_offset(s, rn), is_q ? 16 : 8,
+                        vec_full_reg_size(s), shift, gvec_op);
+        return;
+
+    case 0x00: /* SSHR / USHR */
+        if (is_u) {
+            if (shift == 8 << size) {
+                /* Shift count the same size as element size produces zero.  */
+                tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
+                                   is_q ? 16 : 8, vec_full_reg_size(s), 0);
+            } else {
+                tcg_gen_gvec_shri(size, vec_full_reg_offset(s, rd),
+                                  vec_full_reg_offset(s, rn), is_q ? 16 : 8,
+                                  vec_full_reg_size(s), shift);
+            }
+        } else {
+            /* Shift count the same size as element size produces all sign.  */
+            if (shift == 8 << size) {
+                shift -= 1;
+            }
+            tcg_gen_gvec_sari(size, vec_full_reg_offset(s, rd),
+                              vec_full_reg_offset(s, rn), is_q ? 16 : 8,
+                              vec_full_reg_size(s), shift);
+        }
+        return;
+
     case 0x04: /* SRSHR / URSHR (rounding) */
-        round = true;
         break;
     case 0x06: /* SRSRA / URSRA (accum + rounding) */
-        accumulate = round = true;
-        break;
-    case 0x08: /* SRI */
-        insert = true;
+        accumulate = true;
         break;
+    default:
+        g_assert_not_reached();
     }
 
-    if (round) {
-        uint64_t round_const = 1ULL << (shift - 1);
-        tcg_round = tcg_const_i64(round_const);
-    } else {
-        tcg_round = NULL;
-    }
+    round_const = 1ULL << (shift - 1);
+    tcg_round = tcg_const_i64(round_const);
 
     for (i = 0; i < elements; i++) {
         read_vec_element(s, tcg_rn, rn, i, memop);
-        if (accumulate || insert) {
+        if (accumulate) {
             read_vec_element(s, tcg_rd, rd, i, memop);
         }
 
-        if (insert) {
-            handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
-        } else {
-            handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
-                                    accumulate, is_u, size, shift);
-        }
+        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+                                accumulate, is_u, size, shift);
 
         write_vec_element(s, tcg_rd, rd, i, size);
     }
+    tcg_temp_free_i64(tcg_round);
 
+ done:
     if (!is_q) {
         clear_vec_high(s, rd);
     }
+}
 
-    if (round) {
-        tcg_temp_free_i64(tcg_round);
-    }
+static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    uint64_t mask = ((0xff << shift) & 0xff) * (-1ull / 0xff);
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_shli_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    uint64_t mask = ((0xffff << shift) & 0xffff) * (-1ull / 0xffff);
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    tcg_gen_shli_i64(t, a, shift);
+    tcg_gen_andi_i64(t, t, mask);
+    tcg_gen_andi_i64(d, d, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, unsigned shift)
+{
+    tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
+}
+
+static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, unsigned shift)
+{
+    tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
+}
+
+static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, unsigned sh)
+{
+    uint64_t mask = (1ull << sh) - 1;
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    TCGv_vec m = tcg_temp_new_vec_matching(d);
+
+    tcg_gen_dupi_vec(vece, m, mask);
+    tcg_gen_shli_vec(vece, t, a, sh);
+    tcg_gen_and_vec(vece, d, d, m);
+    tcg_gen_or_vec(vece, d, d, t);
+
+    tcg_temp_free_vec(t);
+    tcg_temp_free_vec(m);
 }
 
 /* SHL/SLI - Vector shift left */
 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
-                                int immh, int immb, int opcode, int rn, int rd)
+                                 int immh, int immb, int opcode, int rn, int rd)
 {
     int size = 32 - clz32(immh) - 1;
     int immhb = immh << 3 | immb;
     int shift = immhb - (8 << size);
-    int dsize = is_q ? 128 : 64;
-    int esize = 8 << size;
-    int elements = dsize/esize;
-    TCGv_i64 tcg_rn = new_tmp_a64(s);
-    TCGv_i64 tcg_rd = new_tmp_a64(s);
-    int i;
 
     if (extract32(immh, 3, 1) && !is_q) {
         unallocated_encoding(s);
@@ -8391,19 +8642,40 @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
         return;
     }
 
-    for (i = 0; i < elements; i++) {
-        read_vec_element(s, tcg_rn, rn, i, size);
-        if (insert) {
-            read_vec_element(s, tcg_rd, rd, i, size);
-        }
-
-        handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
-
-        write_vec_element(s, tcg_rd, rd, i, size);
-    }
-
-    if (!is_q) {
-        clear_vec_high(s, rd);
+    if (insert) {
+        static const GVecGen2i shi_op[4] = {
+            { .fni8 = gen_shl8_ins_i64,
+              .fniv = gen_shl_ins_vec,
+              .opc = INDEX_op_shli_vec,
+              .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+              .load_dest = true,
+              .vece = MO_8 },
+            { .fni8 = gen_shl16_ins_i64,
+              .fniv = gen_shl_ins_vec,
+              .opc = INDEX_op_shli_vec,
+              .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+              .load_dest = true,
+              .vece = MO_16 },
+            { .fni4 = gen_shl32_ins_i32,
+              .fniv = gen_shl_ins_vec,
+              .opc = INDEX_op_shli_vec,
+              .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+              .load_dest = true,
+              .vece = MO_32 },
+            { .fni8 = gen_shl64_ins_i64,
+              .fniv = gen_shl_ins_vec,
+              .opc = INDEX_op_shli_vec,
+              .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+              .load_dest = true,
+              .vece = MO_64 },
+        };
+        tcg_gen_gvec_2i(vec_full_reg_offset(s, rd),
+                        vec_full_reg_offset(s, rn), is_q ? 16 : 8,
+                        vec_full_reg_size(s), shift, &shi_op[size]);
+    } else {
+        tcg_gen_gvec_shli(size, vec_full_reg_offset(s, rd),
+                          vec_full_reg_offset(s, rn), is_q ? 16 : 8,
+                          vec_full_reg_size(s), shift);
     }
 }
 
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index f8ccb137eb..d91b424dfc 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -1208,7 +1208,11 @@ void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
     };
 
     tcg_debug_assert(vece <= MO_64);
-    tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]);
+    if (shift == 0) {
+        tcg_gen_gvec_mov(vece, dofs, aofs, opsz, clsz);
+    } else {
+        tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]);
+    }
 }
 
 void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, unsigned c)
@@ -1253,7 +1257,11 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
     };
 
     tcg_debug_assert(vece <= MO_64);
-    tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]);
+    if (shift == 0) {
+        tcg_gen_gvec_mov(vece, dofs, aofs, opsz, clsz);
+    } else {
+        tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]);
+    }
 }
 
 void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, unsigned c)
@@ -1312,7 +1320,11 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
     };
 
     tcg_debug_assert(vece <= MO_64);
-    tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]);
+    if (shift == 0) {
+        tcg_gen_gvec_mov(vece, dofs, aofs, opsz, clsz);
+    } else {
+        tcg_gen_gvec_2i(dofs, aofs, opsz, clsz, shift, &g[vece]);
+    }
 }
 
 static void do_zip(unsigned vece, uint32_t dofs, uint32_t aofs,
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index a441193b8e..502c5ba891 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -389,11 +389,16 @@ static void do_shifti(TCGOpcode opc, unsigned vece,
     TCGArg ri = temp_arg(rt);
     TCGArg ai = temp_arg(at);
     TCGType type = rt->base_type;
-    unsigned vecl = type - TCG_TYPE_V64;
     int can;
 
     tcg_debug_assert(at->base_type == type);
     tcg_debug_assert(i < (8 << vece));
+
+    if (i == 0) {
+        tcg_gen_mov_vec(r, a);
+        return;
+    }
+
     can = tcg_can_emit_vec_op(opc, type, vece);
     if (can > 0) {
         vec_gen_3(opc, type, vece, ri, ai, i);
@@ -402,7 +407,7 @@ static void do_shifti(TCGOpcode opc, unsigned vece,
            to the target.  Often, but not always, dupi can feed a vector
            shift easier than a scalar.  */
         tcg_debug_assert(can < 0);
-        tcg_expand_vec_op(opc, vecl, vece, ri, ai, i);
+        tcg_expand_vec_op(opc, type, vece, ri, ai, i);
     }
 }
 
-- 
2.13.6

  parent reply	other threads:[~2017-11-21 21:28 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-11-21 21:25 [Qemu-devel] [PATCH v6 00/26] tcg: generic vector operations Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 01/26] tcg: Remove TCGV_UNUSED* and TCGV_IS_UNUSED* Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 02/26] tcg: Dynamically allocate TCGOps Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 03/26] tcg: Generalize TCGOp parameters Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 04/26] tcg: Add types and basic operations for host vectors Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 05/26] tcg: Add generic vector expanders Richard Henderson
2017-12-06 10:21   ` Kirill Batuzov
2017-12-08 21:35     ` Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 06/26] tcg: Allow multiple word entries into the constant pool Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 07/26] tcg: Add tcg_signed_cond Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 08/26] target/arm: Align vector registers Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 09/26] target/arm: Use vector infrastructure for aa64 add/sub/logic Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 10/26] target/arm: Use vector infrastructure for aa64 mov/not/neg Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 11/26] target/arm: Use vector infrastructure for aa64 dup/movi Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 12/26] tcg/i386: Add vector operations Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 13/26] tcg: Add tcg_expand_vec_op and tcg-target.opc.h Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 14/26] tcg: Add generic vector ops for interleave Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 15/26] target/arm: Use vector infrastructure for aa64 zip/uzp/trn/xtn Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 16/26] tcg: Add generic vector ops for constant shifts Richard Henderson
2017-11-21 21:25 ` Richard Henderson [this message]
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 18/26] tcg: Add generic vector ops for comparisons Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 19/26] target/arm: Use vector infrastructure for aa64 compares Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 20/26] tcg/i386: Add vector operations/expansions for shift/cmp/interleave Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 21/26] tcg: Add generic vector ops for multiplication Richard Henderson
2017-12-05 11:33   ` Kirill Batuzov
2017-12-08 21:36     ` Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 22/26] target/arm: Use vector infrastructure for aa64 multiplies Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 23/26] tcg: Add generic vector ops for extension Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 24/26] target/arm: Use vector infrastructure for aa64 widening shifts Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 25/26] tcg/i386: Add vector operations/expansions for mul/extend Richard Henderson
2017-11-21 21:25 ` [Qemu-devel] [PATCH v6 26/26] tcg/aarch64: Add vector operations Richard Henderson
2017-11-21 22:10 ` [Qemu-devel] [PATCH v6 00/26] tcg: generic " no-reply
2017-11-21 22:19 ` no-reply
2017-11-21 22:23 ` no-reply
2017-11-21 22:44 ` no-reply
2017-11-27 16:09 ` Timothy Pearson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171121212534.5177-18-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).