qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-devel@nongnu.org
Cc: patches@linaro.org, "Michael Matz" <matz@suse.de>,
	"Alexander Graf" <agraf@suse.de>,
	"Claudio Fontana" <claudio.fontana@linaro.org>,
	"Dirk Mueller" <dmueller@suse.de>,
	"Will Newton" <will.newton@linaro.org>,
	"Laurent Desnogues" <laurent.desnogues@gmail.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	kvmarm@lists.cs.columbia.edu,
	"Christoffer Dall" <christoffer.dall@linaro.org>,
	"Richard Henderson" <rth@twiddle.net>
Subject: [Qemu-devel] [PATCH 07/10] target-arm: A64: Add SIMD across-lanes instructions
Date: Fri, 10 Jan 2014 17:12:49 +0000	[thread overview]
Message-ID: <1389373972-27686-8-git-send-email-peter.maydell@linaro.org> (raw)
In-Reply-To: <1389373972-27686-1-git-send-email-peter.maydell@linaro.org>

From: Michael Matz <matz@suse.de>

Add support for the SIMD "across lanes" instruction group (C3.6.4).

Signed-off-by: Michael Matz <matz@suse.de>
[PMM: Updated to current codebase, added fp min/max ops,
 added unallocated encoding checks]
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target-arm/translate-a64.c | 177 ++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 176 insertions(+), 1 deletion(-)

diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index ec39dd3..e9aeaa0 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -4859,6 +4859,29 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
     tcg_temp_free_i64(tcg_resh);
 }
 
+static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
+                        int opc, bool is_min, TCGv_ptr fpst)
+{
+    /* Helper function for disas_simd_across_lanes: do a single precision
+     * min/max operation on the specified two inputs,
+     * and return the result in tcg_elt1.
+     */
+    if (opc == 0xc) {
+        if (is_min) {
+            gen_helper_vfp_minnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+        } else {
+            gen_helper_vfp_maxnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+        }
+    } else {
+        assert(opc == 0xf);
+        if (is_min) {
+            gen_helper_vfp_mins(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+        } else {
+            gen_helper_vfp_maxs(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+        }
+    }
+}
+
 /* C3.6.4 AdvSIMD across lanes
  *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
@@ -4867,7 +4890,159 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
  */
 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
 {
-    unsupported_encoding(s, insn);
+    int rd = extract32(insn, 0, 5);
+    int rn = extract32(insn, 5, 5);
+    int size = extract32(insn, 22, 2);
+    int opcode = extract32(insn, 12, 5);
+    bool is_q = extract32(insn, 30, 1);
+    bool is_u = extract32(insn, 29, 1);
+    bool is_fp = false;
+    bool is_min = false;
+    int esize;
+    int elements;
+    int i;
+    TCGv_i64 tcg_res, tcg_elt;
+
+    switch (opcode) {
+    case 0x1b: /* ADDV */
+        if (is_u) {
+            unallocated_encoding(s);
+            return;
+        }
+        /* fall through */
+    case 0x3: /* SADDLV, UADDLV */
+    case 0xa: /* SMAXV, UMAXV */
+    case 0x1a: /* SMINV, UMINV */
+        if (size == 3 || (size == 2 && !is_q)) {
+            unallocated_encoding(s);
+            return;
+        }
+        break;
+    case 0xc: /* FMAXNMV, FMINNMV */
+    case 0xf: /* FMAXV, FMINV */
+        if (!is_u || !is_q || extract32(size, 0, 1)) {
+            unallocated_encoding(s);
+            return;
+        }
+        /* Bit 1 of size field encodes min vs max, and actual size is always
+         * 32 bits: adjust the size variable so following code can rely on it
+         */
+        is_min = extract32(size, 1, 1);
+        is_fp = true;
+        size = 2;
+        break;
+    default:
+        unallocated_encoding(s);
+        return;
+    }
+
+    esize = 8 << size;
+    elements = (is_q ? 128 : 64) / esize;
+
+    tcg_res = tcg_temp_new_i64();
+    tcg_elt = tcg_temp_new_i64();
+
+    /* These instructions operate across all lanes of a vector
+     * to produce a single result. We can guarantee that a 64
+     * bit intermediate is sufficient:
+     *  + for [US]ADDLV the maximum element size is 32 bits, and
+     *    the result type is 64 bits
+     *  + for FMAX*V, FMIN*V, ADDV the intermediate type is the
+     *    same as the element size, which is 32 bits at most
+     * For the integer operations we can choose to work at 64
+     * or 32 bits and truncate at the end; for simplicity
+     * we use 64 bits always. The floating point
+     * ops do require 32 bit intermediates, though.
+     */
+    if (!is_fp) {
+        read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
+
+        for (i = 1; i < elements; i++) {
+            read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
+
+            switch (opcode) {
+            case 0x03: /* SADDLV / UADDLV */
+            case 0x1b: /* ADDV */
+                tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
+                break;
+            case 0x0a: /* SMAXV / UMAXV */
+                tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
+                                    tcg_res,
+                                    tcg_res, tcg_elt, tcg_res, tcg_elt);
+                break;
+            case 0x1a: /* SMINV / UMINV */
+                tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
+                                    tcg_res,
+                                    tcg_res, tcg_elt, tcg_res, tcg_elt);
+                break;
+                break;
+            default:
+                g_assert_not_reached();
+            }
+
+        }
+    } else {
+        /* Floating point ops which work on 32 bit (single) intermediates.
+         * Note that correct NaN propagation requires that we do these
+         * operations in exactly the order specified by the pseudocode.
+         */
+        TCGv_i32 tcg_elt1 = tcg_temp_new_i32();
+        TCGv_i32 tcg_elt2 = tcg_temp_new_i32();
+        TCGv_i32 tcg_elt3 = tcg_temp_new_i32();
+        TCGv_ptr fpst = get_fpstatus_ptr();
+
+        assert(esize == 32);
+        assert(elements == 4);
+
+        read_vec_element(s, tcg_elt, rn, 0, MO_32);
+        tcg_gen_trunc_i64_i32(tcg_elt1, tcg_elt);
+        read_vec_element(s, tcg_elt, rn, 1, MO_32);
+        tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
+
+        do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
+
+        read_vec_element(s, tcg_elt, rn, 2, MO_32);
+        tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
+        read_vec_element(s, tcg_elt, rn, 3, MO_32);
+        tcg_gen_trunc_i64_i32(tcg_elt3, tcg_elt);
+
+        do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
+
+        do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
+
+        tcg_gen_extu_i32_i64(tcg_res, tcg_elt1);
+        tcg_temp_free_i32(tcg_elt1);
+        tcg_temp_free_i32(tcg_elt2);
+        tcg_temp_free_i32(tcg_elt3);
+        tcg_temp_free_ptr(fpst);
+    }
+
+    tcg_temp_free_i64(tcg_elt);
+
+    /* Now truncate the result to the width required for the final output */
+    if (opcode == 0x03) {
+        /* SADDLV, UADDLV: result is 2*esize */
+        size++;
+    }
+
+    switch (size) {
+    case 0:
+        tcg_gen_ext8u_i64(tcg_res, tcg_res);
+        break;
+    case 1:
+        tcg_gen_ext16u_i64(tcg_res, tcg_res);
+        break;
+    case 2:
+        tcg_gen_ext32u_i64(tcg_res, tcg_res);
+        break;
+    case 3:
+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    write_fp_dreg(s, rd, tcg_res);
+    tcg_temp_free_i64(tcg_res);
 }
 
 /* C3.6.5 AdvSIMD copy
-- 
1.8.5

  parent reply	other threads:[~2014-01-10 17:26 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-01-10 17:12 [Qemu-devel] [PATCH 00/10] A64 SIMD patchset one: ld/st, C3.6.1..C3.6.7 Peter Maydell
2014-01-10 17:12 ` [Qemu-devel] [PATCH 01/10] target-arm: A64: Add SIMD ld/st multiple Peter Maydell
2014-01-10 18:05   ` Richard Henderson
2014-01-10 18:18     ` Peter Maydell
2014-01-10 18:28       ` Richard Henderson
2014-01-10 18:37         ` Peter Maydell
2014-01-10 19:00           ` Richard Henderson
2014-01-10 17:12 ` [Qemu-devel] [PATCH 02/10] target-arm: A64: Add SIMD ld/st single Peter Maydell
2014-01-10 18:12   ` Richard Henderson
2014-01-10 17:12 ` [Qemu-devel] [PATCH 03/10] target-arm: A64: Add decode skeleton for SIMD data processing insns Peter Maydell
2014-01-10 18:55   ` Richard Henderson
2014-01-10 19:05   ` Richard Henderson
2014-01-11  0:01     ` Peter Maydell
2014-01-10 17:12 ` [Qemu-devel] [PATCH 04/10] target-arm: A64: Add SIMD EXT Peter Maydell
2014-01-10 19:13   ` Richard Henderson
2014-01-10 17:12 ` [Qemu-devel] [PATCH 05/10] target-arm: A64: Add SIMD TBL/TBLX Peter Maydell
2014-01-10 19:19   ` Richard Henderson
2014-01-10 17:12 ` [Qemu-devel] [PATCH 06/10] target-arm: A64: Add SIMD ZIP/UZP/TRN Peter Maydell
2014-01-10 19:29   ` Richard Henderson
2014-01-11  8:30     ` Alex Bennée
2014-01-10 17:12 ` Peter Maydell [this message]
2014-01-10 19:38   ` [Qemu-devel] [PATCH 07/10] target-arm: A64: Add SIMD across-lanes instructions Richard Henderson
2014-01-10 17:12 ` [Qemu-devel] [PATCH 08/10] target-arm: A64: Add SIMD copy operations Peter Maydell
2014-01-10 19:50   ` Richard Henderson
2014-01-10 17:12 ` [Qemu-devel] [PATCH 09/10] target-arm: A64: Add SIMD modified immediate group Peter Maydell
2014-01-10 20:00   ` Richard Henderson
2014-01-10 17:12 ` [Qemu-devel] [PATCH 10/10] target-arm: A64: Add SIMD scalar copy instructions Peter Maydell
2014-01-10 20:03   ` Richard Henderson
2014-01-15 15:10   ` Claudio Fontana
2014-01-15 18:01     ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1389373972-27686-8-git-send-email-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=agraf@suse.de \
    --cc=alex.bennee@linaro.org \
    --cc=christoffer.dall@linaro.org \
    --cc=claudio.fontana@linaro.org \
    --cc=dmueller@suse.de \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=laurent.desnogues@gmail.com \
    --cc=matz@suse.de \
    --cc=patches@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=will.newton@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).