From: Richard Henderson <rth@twiddle.net>
To: qemu-devel@nongnu.org
Cc: claudio.fontana@huawei.com, Richard Henderson <rth@twiddle.net>
Subject: [Qemu-devel] [PATCH v3 07/29] tcg-aarch64: Introduce tcg_fmt_* functions
Date: Mon, 2 Sep 2013 10:54:41 -0700 [thread overview]
Message-ID: <1378144503-15808-8-git-send-email-rth@twiddle.net> (raw)
In-Reply-To: <1378144503-15808-1-git-send-email-rth@twiddle.net>
Now that we've converted opcode fields to pre-shifted insns, we
can merge the implementation of arithmetic and shift insns.
Signed-off-by: Richard Henderson <rth@twiddle.net>
---
tcg/aarch64/tcg-target.c | 78 +++++++++++++++++++++++-------------------------
1 file changed, 38 insertions(+), 40 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index de97fbd..e2f3d1c 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -297,6 +297,30 @@ static inline uint32_t tcg_in32(TCGContext *s)
return v;
}
+/*
+ * Encode various formats. Note that since the architecture document is
+ * still private, these names are made up.
+ */
+
+static inline void tcg_fmt_Rdnm(TCGContext *s, AArch64Insn insn, bool ext,
+ TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd);
+}
+
+static inline void tcg_fmt_Rdnm_shift(TCGContext *s, AArch64Insn insn,
+ bool ext, TCGReg rd, TCGReg rn,
+ TCGReg rm, int shift_imm)
+{
+ unsigned int shift;
+ if (shift_imm > 0) {
+ shift = shift_imm << 10 | 1 << 22;
+ } else {
+ shift = (-shift_imm) << 10;
+ }
+ tcg_out32(s, insn | ext << 31 | shift | rm << 16 | rn << 5 | rd);
+}
+
static inline void tcg_out_ldst_9(TCGContext *s,
enum aarch64_ldst_op_data op_data,
enum aarch64_ldst_op_type op_type,
@@ -438,23 +462,6 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
arg, arg1, arg2);
}
-static inline void tcg_out_arith(TCGContext *s, AArch64Insn insn,
- bool ext, TCGReg rd, TCGReg rn, TCGReg rm,
- int shift_imm)
-{
- /* Using shifted register arithmetic operations */
- /* if extended register operation (64bit) just OR with 0x80 << 24 */
- unsigned int shift, base = insn | (ext ? 0x80000000 : 0);
- if (shift_imm == 0) {
- shift = 0;
- } else if (shift_imm > 0) {
- shift = shift_imm << 10 | 1 << 22;
- } else /* (shift_imm < 0) */ {
- shift = (-shift_imm) << 10;
- }
- tcg_out32(s, base | rm << 16 | shift | rn << 5 | rd);
-}
-
static inline void tcg_out_mul(TCGContext *s, bool ext,
TCGReg rd, TCGReg rn, TCGReg rm)
{
@@ -463,15 +470,6 @@ static inline void tcg_out_mul(TCGContext *s, bool ext,
tcg_out32(s, base | rm << 16 | rn << 5 | rd);
}
-static inline void tcg_out_shiftrot_reg(TCGContext *s,
- AArch64Insn insn, bool ext,
- TCGReg rd, TCGReg rn, TCGReg rm)
-{
- /* using 2-source data processing instructions 0x1ac02000 */
- unsigned int base = insn | (ext ? 0x80000000 : 0);
- tcg_out32(s, base | rm << 16 | rn << 5 | rd);
-}
-
static inline void tcg_out_ubfm(TCGContext *s, bool ext, TCGReg rd, TCGReg rn,
unsigned int a, unsigned int b)
{
@@ -539,7 +537,7 @@ static inline void tcg_out_cmp(TCGContext *s, bool ext, TCGReg rn, TCGReg rm,
int shift_imm)
{
/* Using CMP alias SUBS wzr, Wn, Wm */
- tcg_out_arith(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
+ tcg_fmt_Rdnm_shift(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
}
static inline void tcg_out_cset(TCGContext *s, bool ext, TCGReg rd, TCGCond c)
@@ -896,8 +894,8 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg,
tcg_out_addi(s, 1, TCG_REG_X2, base, tlb_offset & 0xfff000);
/* Merge the tlb index contribution into X2.
X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
- tcg_out_arith(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
- TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
+ tcg_fmt_Rdnm_shift(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
+ TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
/* Merge "low bits" from tlb offset, load the tlb comparator into X0.
X0 = load [X2 + (tlb_offset & 0x000fff)] */
tcg_out_ldst(s, TARGET_LONG_BITS == 64 ? LDST_64 : LDST_32,
@@ -1173,27 +1171,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_add_i64:
case INDEX_op_add_i32:
- tcg_out_arith(s, INSN_ADD, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
break;
case INDEX_op_sub_i64:
case INDEX_op_sub_i32:
- tcg_out_arith(s, INSN_SUB, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
break;
case INDEX_op_and_i64:
case INDEX_op_and_i32:
- tcg_out_arith(s, INSN_AND, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_AND, ext, a0, a1, a2);
break;
case INDEX_op_or_i64:
case INDEX_op_or_i32:
- tcg_out_arith(s, INSN_ORR, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_ORR, ext, a0, a1, a2);
break;
case INDEX_op_xor_i64:
case INDEX_op_xor_i32:
- tcg_out_arith(s, INSN_EOR, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_EOR, ext, a0, a1, a2);
break;
case INDEX_op_mul_i64:
@@ -1206,7 +1204,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_shl(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_LSLV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_LSLV, ext, a0, a1, a2);
}
break;
@@ -1215,7 +1213,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_shr(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_LSRV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_LSRV, ext, a0, a1, a2);
}
break;
@@ -1224,7 +1222,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_sar(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_ASRV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_ASRV, ext, a0, a1, a2);
}
break;
@@ -1233,7 +1231,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_rotr(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, a2);
}
break;
@@ -1242,8 +1240,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_rotl(s, ext, a0, a1, a2);
} else {
- tcg_out_arith(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2, 0);
- tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
+ tcg_fmt_Rdnm(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
+ tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
}
break;
--
1.8.3.1
next prev parent reply other threads:[~2013-09-02 17:55 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-02 17:54 [Qemu-devel] [PATCH v3 00/29] tcg-aarch64 improvements Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 01/29] tcg-aarch64: Set ext based on TCG_OPF_64BIT Richard Henderson
2013-09-12 8:25 ` Claudio Fontana
2013-09-12 8:58 ` Peter Maydell
2013-09-12 9:01 ` Claudio Fontana
2013-09-12 13:21 ` Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 02/29] tcg-aarch64: Change all ext variables to bool Richard Henderson
2013-09-12 8:29 ` Claudio Fontana
2013-09-12 13:45 ` Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 03/29] tcg-aarch64: Don't handle mov/movi in tcg_out_op Richard Henderson
2013-09-12 8:30 ` Claudio Fontana
2013-09-12 14:02 ` Richard Henderson
2013-09-12 14:31 ` Claudio Fontana
2013-09-12 14:35 ` Peter Maydell
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 04/29] tcg-aarch64: Hoist common argument loads " Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 05/29] tcg-aarch64: Change enum aarch64_arith_opc to AArch64Insn Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 06/29] tcg-aarch64: Merge enum aarch64_srr_opc with AArch64Insn Richard Henderson
2013-09-02 17:54 ` Richard Henderson [this message]
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 08/29] tcg-aarch64: Introduce tcg_fmt_Rdn_aimm Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 09/29] tcg-aarch64: Implement mov with tcg_fmt_* functions Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 10/29] tcg-aarch64: Handle constant operands to add, sub, and compare Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 11/29] tcg-aarch64: Handle constant operands to and, or, xor Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 12/29] tcg-aarch64: Support andc, orc, eqv, not Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 13/29] tcg-aarch64: Handle zero as first argument to sub Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 14/29] tcg-aarch64: Support movcond Richard Henderson
2013-09-09 15:09 ` Claudio Fontana
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 15/29] tcg-aarch64: Support deposit Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 16/29] tcg-aarch64: Support add2, sub2 Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 17/29] tcg-aarch64: Support muluh, mulsh Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 18/29] tcg-aarch64: Support div, rem Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 19/29] tcg-aarch64: Introduce tcg_fmt_Rd_uimm_s Richard Henderson
2013-09-05 13:32 ` Claudio Fontana
2013-09-05 15:41 ` Richard Henderson
2013-09-06 9:06 ` Claudio Fontana
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 20/29] tcg-aarch64: Improve tcg_out_movi Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 21/29] tcg-aarch64: Avoid add with zero in tlb load Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 22/29] tcg-aarch64: Use adrp in tcg_out_movi Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 23/29] tcg-aarch64: Pass return address to load/store helpers directly Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 24/29] tcg-aarch64: Use tcg_out_call for qemu_ld/st Richard Henderson
2013-09-02 17:54 ` [Qemu-devel] [PATCH v3 25/29] tcg-aarch64: Use symbolic names for branches Richard Henderson
2013-09-02 17:55 ` [Qemu-devel] [PATCH v3 26/29] tcg-aarch64: Implement tcg_register_jit Richard Henderson
2013-09-02 17:55 ` [Qemu-devel] [PATCH v3 27/29] tcg-aarch64: Reuse FP and LR in translated code Richard Henderson
2013-09-02 17:55 ` [Qemu-devel] [PATCH v3 28/29] tcg-aarch64: Introduce tcg_out_ldst_pair Richard Henderson
2013-09-02 17:55 ` [Qemu-devel] [PATCH v3 29/29] tcg-aarch64: Remove redundant CPU_TLB_ENTRY_BITS check Richard Henderson
2013-09-03 7:37 ` [Qemu-devel] [PATCH v3 00/29] tcg-aarch64 improvements Richard W.M. Jones
2013-09-03 7:42 ` Laurent Desnogues
2013-09-03 8:00 ` Peter Maydell
2013-09-09 8:13 ` Claudio Fontana
2013-09-09 14:08 ` Richard Henderson
2013-09-09 15:02 ` Claudio Fontana
2013-09-09 15:04 ` Peter Maydell
2013-09-09 15:07 ` Richard Henderson
2013-09-10 8:27 ` Claudio Fontana
2013-09-10 8:45 ` Peter Maydell
2013-09-12 8:03 ` Claudio Fontana
2013-09-12 8:55 ` Peter Maydell
2013-09-10 13:16 ` Richard Henderson
2013-09-12 8:11 ` Claudio Fontana
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1378144503-15808-8-git-send-email-rth@twiddle.net \
--to=rth@twiddle.net \
--cc=claudio.fontana@huawei.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).