From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org, qemu-arm@nongnu.org, alex.bennee@linaro.org
Subject: [Qemu-devel] [PATCH 29/67] target/arm: Convert LDM, STM
Date: Fri, 26 Jul 2019 10:49:54 -0700 [thread overview]
Message-ID: <20190726175032.6769-30-richard.henderson@linaro.org> (raw)
In-Reply-To: <20190726175032.6769-1-richard.henderson@linaro.org>
While unifying all of these paths, remove the constrained unpredictable
test for "wback && registers<n> == 1" from the T2 encoding that isn't
constrained unpredictable for the A1 encoding. The A1 behaviour is
allowed for the T2 behaviour.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate.c | 388 ++++++++++++++++++++---------------------
target/arm/a32.decode | 6 +
target/arm/t32.decode | 10 ++
3 files changed, 206 insertions(+), 198 deletions(-)
diff --git a/target/arm/translate.c b/target/arm/translate.c
index ee97a0ccdd..33daf70d5d 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -9742,6 +9742,192 @@ static bool trans_UDIV(DisasContext *s, arg_rrr *a)
return op_div(s, a, true);
}
+/*
+ * Block data transfer
+ */
+
+static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
+{
+ TCGv_i32 addr = load_reg(s, a->rn);
+
+ if (a->b) {
+ if (a->i) {
+ /* pre increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ } else if (!a->i && n != 1) {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ /*
+ * If the writeback is incrementing SP rather than
+ * decrementing it, and the initial SP is below the
+ * stack limit but the final written-back SP would
+ * be above, then then we must not perform any memory
+ * accesses, but it is IMPDEF whether we generate
+ * an exception. We choose to do so in this case.
+ * At this point 'addr' is the lowest address, so
+ * either the original SP (if incrementing) or our
+ * final SP (if decrementing), so that's what we check.
+ */
+ gen_helper_v8m_stackcheck(cpu_env, addr);
+ }
+
+ return addr;
+}
+
+static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
+ TCGv_i32 addr, int n)
+{
+ if (a->w) {
+ /* write back */
+ if (!a->b) {
+ if (a->i) {
+ /* post increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ } else if (!a->i && n != 1) {
+ /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+}
+
+static bool trans_STM(DisasContext *s, arg_ldst_block *a)
+{
+ int i, j, n, list, mem_idx;
+ TCGv_i32 addr, tmp, tmp2;
+ bool user = a->u;
+
+ if (user) {
+ /* STM (user) */
+ if (IS_USER(s)) {
+ /* Only usable in supervisor mode. */
+ return false;
+ }
+ }
+
+ /* compute total size */
+ list = a->list;
+ n = ctpop16(list);
+ mem_idx = get_mem_index(s);
+
+ addr = op_addr_block_pre(s, a, n);
+ for (i = j = 0; i < 16; i++) {
+ if (((list >> i) & 1) == 0) {
+ continue;
+ }
+ if (user && i != 15) {
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_const_i32(i);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
+ tcg_temp_free_i32(tmp2);
+ } else {
+ tmp = load_reg(s, i);
+ }
+ gen_aa32_st32(s, tmp, addr, mem_idx);
+ tcg_temp_free_i32(tmp);
+ /* No need to add after the last transfer. */
+ if (++j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+
+ op_addr_block_post(s, a, addr, n);
+ return true;
+}
+
+static bool trans_LDM(DisasContext *s, arg_ldst_block *a)
+{
+ int i, j, n, mem_idx, loaded_base;
+ int list = a->list;
+ bool user = a->u;
+ bool exc_return = false;
+ TCGv_i32 addr, tmp, tmp2, loaded_var;
+
+ if (user) {
+ /* LDM (user), LDM (exception return) */
+ if (IS_USER(s)) {
+ /* Only usable in supervisor mode. */
+ return false;
+ }
+ if (extract32(a->list, 15, 1)) {
+ exc_return = true;
+ user = false;
+ } else {
+ /* LDM (User) does not allow W set. */
+ if (a->w) {
+ return false;
+ }
+ }
+ }
+
+ /* compute total size */
+ loaded_base = 0;
+ loaded_var = NULL;
+ n = ctpop16(list);
+ mem_idx = get_mem_index(s);
+
+ addr = op_addr_block_pre(s, a, n);
+ for (i = j = 0; i < 16; i++) {
+ if (((list >> i) & 1) == 0) {
+ continue;
+ }
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, mem_idx);
+ if (user) {
+ tmp2 = tcg_const_i32(i);
+ gen_helper_set_user_reg(cpu_env, tmp2, tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ } else if (i == a->rn) {
+ loaded_var = tmp;
+ loaded_base = 1;
+ } else if (i == 15 && exc_return) {
+ store_pc_exc_ret(s, tmp);
+ } else {
+ store_reg_from_load(s, i, tmp);
+ }
+ /* No need to add after the last transfer. */
+ if (++j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+
+ op_addr_block_post(s, a, addr, n);
+
+ if (loaded_base) {
+ store_reg(s, a->rn, loaded_var);
+ }
+
+ if (exc_return) {
+ /* Restore CPSR from SPSR. */
+ tmp = load_cpu_field(spsr);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_cpsr_write_eret(cpu_env, tmp);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+ tcg_temp_free_i32(tmp);
+ /* Must exit loop to check un-masked IRQs */
+ s->base.is_jmp = DISAS_EXIT;
+ }
+ return true;
+}
+
/*
* Legacy decoder.
*/
@@ -10017,139 +10203,10 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
case 0x5:
case 0x6:
case 0x7:
- /* All done in decodetree. Reach here for illegal ops. */
- goto illegal_op;
case 0x08:
case 0x09:
- {
- int j, n, loaded_base;
- bool exc_return = false;
- bool is_load = extract32(insn, 20, 1);
- bool user = false;
- TCGv_i32 loaded_var;
- /* load/store multiple words */
- /* XXX: store correct base if write back */
- if (insn & (1 << 22)) {
- /* LDM (user), LDM (exception return) and STM (user) */
- if (IS_USER(s))
- goto illegal_op; /* only usable in supervisor mode */
-
- if (is_load && extract32(insn, 15, 1)) {
- exc_return = true;
- } else {
- user = true;
- }
- }
- rn = (insn >> 16) & 0xf;
- addr = load_reg(s, rn);
-
- /* compute total size */
- loaded_base = 0;
- loaded_var = NULL;
- n = 0;
- for (i = 0; i < 16; i++) {
- if (insn & (1 << i))
- n++;
- }
- /* XXX: test invalid n == 0 case ? */
- if (insn & (1 << 23)) {
- if (insn & (1 << 24)) {
- /* pre increment */
- tcg_gen_addi_i32(addr, addr, 4);
- } else {
- /* post increment */
- }
- } else {
- if (insn & (1 << 24)) {
- /* pre decrement */
- tcg_gen_addi_i32(addr, addr, -(n * 4));
- } else {
- /* post decrement */
- if (n != 1)
- tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
- }
- }
- j = 0;
- for (i = 0; i < 16; i++) {
- if (insn & (1 << i)) {
- if (is_load) {
- /* load */
- tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
- if (user) {
- tmp2 = tcg_const_i32(i);
- gen_helper_set_user_reg(cpu_env, tmp2, tmp);
- tcg_temp_free_i32(tmp2);
- tcg_temp_free_i32(tmp);
- } else if (i == rn) {
- loaded_var = tmp;
- loaded_base = 1;
- } else if (i == 15 && exc_return) {
- store_pc_exc_ret(s, tmp);
- } else {
- store_reg_from_load(s, i, tmp);
- }
- } else {
- /* store */
- if (user && i != 15) {
- tmp = tcg_temp_new_i32();
- tmp2 = tcg_const_i32(i);
- gen_helper_get_user_reg(tmp, cpu_env, tmp2);
- tcg_temp_free_i32(tmp2);
- } else {
- tmp = load_reg(s, i);
- }
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
- tcg_temp_free_i32(tmp);
- }
- j++;
- /* no need to add after the last transfer */
- if (j != n)
- tcg_gen_addi_i32(addr, addr, 4);
- }
- }
- if (insn & (1 << 21)) {
- /* write back */
- if (insn & (1 << 23)) {
- if (insn & (1 << 24)) {
- /* pre increment */
- } else {
- /* post increment */
- tcg_gen_addi_i32(addr, addr, 4);
- }
- } else {
- if (insn & (1 << 24)) {
- /* pre decrement */
- if (n != 1)
- tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
- } else {
- /* post decrement */
- tcg_gen_addi_i32(addr, addr, -(n * 4));
- }
- }
- store_reg(s, rn, addr);
- } else {
- tcg_temp_free_i32(addr);
- }
- if (loaded_base) {
- store_reg(s, rn, loaded_var);
- }
- if (exc_return) {
- /* Restore CPSR from SPSR. */
- tmp = load_cpu_field(spsr);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
- gen_helper_cpsr_write_eret(cpu_env, tmp);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
- tcg_temp_free_i32(tmp);
- /* Must exit loop to check un-masked IRQs */
- s->base.is_jmp = DISAS_EXIT;
- }
- }
- break;
+ /* All done in decodetree. Reach here for illegal ops. */
+ goto illegal_op;
case 0xa:
case 0xb:
{
@@ -10422,73 +10479,8 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
insn & (1 << 21));
}
} else {
- int i, loaded_base = 0;
- TCGv_i32 loaded_var;
- bool wback = extract32(insn, 21, 1);
- /* Load/store multiple. */
- addr = load_reg(s, rn);
- offset = 0;
- for (i = 0; i < 16; i++) {
- if (insn & (1 << i))
- offset += 4;
- }
-
- if (insn & (1 << 24)) {
- tcg_gen_addi_i32(addr, addr, -offset);
- }
-
- if (s->v8m_stackcheck && rn == 13 && wback) {
- /*
- * If the writeback is incrementing SP rather than
- * decrementing it, and the initial SP is below the
- * stack limit but the final written-back SP would
- * be above, then then we must not perform any memory
- * accesses, but it is IMPDEF whether we generate
- * an exception. We choose to do so in this case.
- * At this point 'addr' is the lowest address, so
- * either the original SP (if incrementing) or our
- * final SP (if decrementing), so that's what we check.
- */
- gen_helper_v8m_stackcheck(cpu_env, addr);
- }
-
- loaded_var = NULL;
- for (i = 0; i < 16; i++) {
- if ((insn & (1 << i)) == 0)
- continue;
- if (insn & (1 << 20)) {
- /* Load. */
- tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
- if (i == rn) {
- loaded_var = tmp;
- loaded_base = 1;
- } else {
- store_reg_from_load(s, i, tmp);
- }
- } else {
- /* Store. */
- tmp = load_reg(s, i);
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
- tcg_temp_free_i32(tmp);
- }
- tcg_gen_addi_i32(addr, addr, 4);
- }
- if (loaded_base) {
- store_reg(s, rn, loaded_var);
- }
- if (wback) {
- /* Base register writeback. */
- if (insn & (1 << 24)) {
- tcg_gen_addi_i32(addr, addr, -offset);
- }
- /* Fault if writeback register is in register list. */
- if (insn & (1 << rn))
- goto illegal_op;
- store_reg(s, rn, addr);
- } else {
- tcg_temp_free_i32(addr);
- }
+ /* Load/store multiple, in decodetree */
+ goto illegal_op;
}
}
break;
diff --git a/target/arm/a32.decode b/target/arm/a32.decode
index 02d7e5b202..96c47aaf2a 100644
--- a/target/arm/a32.decode
+++ b/target/arm/a32.decode
@@ -40,6 +40,7 @@
&mrs_bank rd r sysm
&ldst_rr p w u rn rt rm shimm shtype
&ldst_ri p w u rn rt imm
+&ldst_block rn i b u w list
&strex rn rd rt rt2 imm
&ldrex rn rt rt2 imm
&bfx rd rn lsb widthm1
@@ -514,3 +515,8 @@ SMMLA .... 0111 0101 .... .... .... 0001 .... @rdamn
SMMLAR .... 0111 0101 .... .... .... 0011 .... @rdamn
SMMLS .... 0111 0101 .... .... .... 1101 .... @rdamn
SMMLSR .... 0111 0101 .... .... .... 1111 .... @rdamn
+
+# Block data transfer
+
+STM ---- 100 b:1 i:1 u:1 w:1 0 rn:4 list:16 &ldst_block
+LDM ---- 100 b:1 i:1 u:1 w:1 1 rn:4 list:16 &ldst_block
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
index fdcfb60cc5..3ab82c0962 100644
--- a/target/arm/t32.decode
+++ b/target/arm/t32.decode
@@ -37,6 +37,7 @@
&mrs_bank !extern rd r sysm
&ldst_rr !extern p w u rn rt rm shimm shtype
&ldst_ri !extern p w u rn rt imm
+&ldst_block !extern rn i b u w list
&strex !extern rn rd rt rt2 imm
&ldrex !extern rn rt rt2 imm
&bfx !extern rd rn lsb widthm1
@@ -563,3 +564,12 @@ SXTAB16 1111 1010 0010 .... 1111 .... 10.. .... @rrr_rot
UXTAB16 1111 1010 0011 .... 1111 .... 10.. .... @rrr_rot
SXTAB 1111 1010 0100 .... 1111 .... 10.. .... @rrr_rot
UXTAB 1111 1010 0101 .... 1111 .... 10.. .... @rrr_rot
+
+# Load/store multiple
+
+@ldstm .... .... .. w:1 . rn:4 list:16 &ldst_block u=0
+
+STM 1110 1000 10.0 .... ................ @ldstm i=1 b=0
+STM 1110 1001 00.0 .... ................ @ldstm i=0 b=1
+LDM 1110 1000 10.1 .... ................ @ldstm i=1 b=0
+LDM 1110 1001 00.1 .... ................ @ldstm i=0 b=1
--
2.17.1
next prev parent reply other threads:[~2019-07-26 17:59 UTC|newest]
Thread overview: 110+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-26 17:49 [Qemu-devel] [PATCH 00/67] target/arm: Convert aa32 base isa to decodetree Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 01/67] decodetree: Allow !function with no input bits Richard Henderson
2019-07-29 13:43 ` Peter Maydell
2019-07-30 1:30 ` Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 02/67] target/arm: Remove offset argument to gen_exception_insn Richard Henderson
2019-07-29 13:48 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 03/67] target/arm: Remove offset argument to gen_exception_bkpt_insn Richard Henderson
2019-07-29 13:50 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 04/67] target/arm: Remove offset argument to gen_exception_internal_insn Richard Henderson
2019-07-29 13:52 ` Peter Maydell
2019-07-30 2:11 ` Richard Henderson
2019-08-06 9:55 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 05/67] target/arm: Use the saved value of the insn address Richard Henderson
2019-07-29 13:54 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 06/67] target/arm: Introduce pc_read Richard Henderson
2019-07-29 14:05 ` Peter Maydell
2019-07-30 0:38 ` Richard Henderson
2019-08-06 10:00 ` Peter Maydell
2019-08-06 15:04 ` Richard Henderson
2019-08-06 15:21 ` Peter Maydell
2019-08-06 15:53 ` Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 07/67] target/arm: Introduce add_reg_for_lit Richard Henderson
2019-07-29 14:15 ` Peter Maydell
2019-07-30 0:51 ` Richard Henderson
2019-08-06 9:44 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 08/67] target/arm: Use store_reg_from_load in thumb2 code Richard Henderson
2019-07-29 14:26 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 09/67] target/arm: Fold a pc load into load_reg Richard Henderson
2019-07-29 14:27 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 10/67] target/arm: Move test for AL into arm_skip_unless Richard Henderson
2019-07-29 14:32 ` Peter Maydell
2019-07-30 0:57 ` Richard Henderson
2019-07-30 8:49 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 11/67] target/arm: Add stubs for aa32 decodetree Richard Henderson
2019-07-29 14:42 ` Peter Maydell
2019-08-06 21:41 ` Philippe Mathieu-Daudé
2019-08-08 11:41 ` Aleksandar Markovic
2019-08-08 15:43 ` Richard Henderson
2019-08-09 10:31 ` Aleksandar Markovic
2019-08-09 14:55 ` Richard Henderson
2019-08-09 15:30 ` Aleksandar Markovic
2019-07-26 17:49 ` [Qemu-devel] [PATCH 12/67] target/arm: Introduce gen_illegal_op Richard Henderson
2019-07-29 14:44 ` Peter Maydell
2019-08-06 21:43 ` Philippe Mathieu-Daudé
2019-07-26 17:49 ` [Qemu-devel] [PATCH 13/67] target/arm: Convert Data Processing (reg, reg-shifted-reg, imm) Richard Henderson
2019-07-29 15:25 ` Peter Maydell
2019-07-30 1:25 ` Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 14/67] target/arm: Convert multiply and multiply accumulate Richard Henderson
2019-08-05 15:32 ` Peter Maydell
2019-08-05 16:20 ` Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 15/67] target/arm: Convert Saturating addition and subtraction Richard Henderson
2019-08-05 15:40 ` Peter Maydell
2019-07-26 17:49 ` [Qemu-devel] [PATCH 16/67] target/arm: Convert Halfword multiply and multiply accumulate Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 17/67] target/arm: Convert MSR (immediate) and hints Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 18/67] target/arm: Convert MRS/MSR (banked, register) Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 19/67] target/arm: Convert Cyclic Redundancy Check Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 20/67] target/arm: Convert the rest of A32 Miscelaneous instructions Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 21/67] target/arm: Convert T32 ADDW/SUBW Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 22/67] target/arm: Convert load/store (register, immediate, literal) Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 23/67] target/arm: Convert Synchronization primitives Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 24/67] target/arm: Convert USAD8, USADA8, SBFX, UBFX, BFC, BFI, UDF Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 25/67] target/arm: Convert Parallel addition and subtraction Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 26/67] target/arm: Convert Packing, unpacking, saturation, and reversal Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 27/67] target/arm: Convert Signed multiply, signed and unsigned divide Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 28/67] target/arm: Convert MOVW, MOVT Richard Henderson
2019-07-26 17:49 ` Richard Henderson [this message]
2019-07-26 17:49 ` [Qemu-devel] [PATCH 30/67] target/arm: Convert B, BL, BLX (immediate) Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 31/67] target/arm: Convert SVC Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 32/67] target/arm: Convert RFE and SRS Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 33/67] target/arm: Convert Clear-Exclusive, Barriers Richard Henderson
2019-07-26 17:49 ` [Qemu-devel] [PATCH 34/67] target/arm: Convert CPS (privileged) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 35/67] target/arm: Convert SETEND Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 36/67] target/arm: Convert PLI, PLD, PLDW Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 37/67] target/arm: Convert Unallocated memory hint Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 38/67] target/arm: Convert Table Branch Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 39/67] target/arm: Convert SG Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 40/67] target/arm: Convert TT Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 41/67] target/arm: Simplify disas_thumb2_insn Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 42/67] target/arm: Simplify disas_arm_insn Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 43/67] target/arm: Add skeleton for T16 decodetree Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 44/67] target/arm: Convert T16 data-processing (two low regs) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 45/67] target/arm: Convert T16 load/store (register offset) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 46/67] target/arm: Convert T16 load/store (immediate offset) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 47/67] target/arm: Convert T16 add pc/sp (immediate) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 48/67] target/arm: Convert T16 load/store multiple Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 49/67] target/arm: Convert T16 add/sub (3 low, 2 low and imm) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 50/67] target/arm: Convert T16 one low register and immediate Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 51/67] target/arm: Convert T16 branch and exchange Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 52/67] target/arm: Convert T16 add, compare, move (two high registers) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 53/67] target/arm: Convert T16 adjust sp (immediate) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 54/67] target/arm: Convert T16, extract Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 55/67] target/arm: Convert T16, Change processor state Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 56/67] target/arm: Convert T16, Reverse bytes Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 57/67] target/arm: Convert T16, nop hints Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 58/67] target/arm: Convert T16, push and pop Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 59/67] target/arm: Convert T16, Conditional branches, Supervisor call Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 60/67] target/arm: Convert T16, Miscellaneous 16-bit instructions Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 61/67] target/arm: Convert T16, shift immediate Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 62/67] target/arm: Convert T16, load (literal) Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 63/67] target/arm: Convert T16, Unconditional branch Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 64/67] target/arm: Convert T16, long branches Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 65/67] target/arm: Clean up disas_thumb_insn Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 66/67] target/arm: Move singlestep check from gen_jmp to gen_goto_tb Richard Henderson
2019-07-26 18:13 ` Peter Maydell
2019-07-26 18:34 ` Richard Henderson
2019-07-26 17:50 ` [Qemu-devel] [PATCH 67/67] target/arm: Merge gen_bx_im into trans_BLX_i Richard Henderson
2019-07-26 18:40 ` [Qemu-devel] [PATCH 00/67] target/arm: Convert aa32 base isa to decodetree no-reply
2019-08-05 15:44 ` Peter Maydell
2019-08-05 21:26 ` Richard Henderson
2019-08-06 10:01 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190726175032.6769-30-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=alex.bennee@linaro.org \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).