* [Qemu-devel] [PATCH 14/19] s390x: Prepare cpu.h for emulation
2011-03-29 13:29 [Qemu-devel] [PATCH 00/19] s390x emulation support Alexander Graf
` (12 preceding siblings ...)
2011-03-29 13:29 ` [Qemu-devel] [PATCH 13/19] s390x: virtio machine storage keys Alexander Graf
@ 2011-03-29 13:29 ` Alexander Graf
2011-03-29 13:29 ` [Qemu-devel] [PATCH 15/19] s390x: helper functions for system emulation Alexander Graf
` (6 subsequent siblings)
20 siblings, 0 replies; 29+ messages in thread
From: Alexander Graf @ 2011-03-29 13:29 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, aurelien, rth
We need to add some more logic to the CPU description to leverage emulation
of an s390x CPU. This patch adds all the required helpers, fields in CPUState
and constant definitions required for user and system emulation.
Signed-off-by: Alexander Graf <agraf@suse.de>
---
v1 -> v2:
- remove FPReg definition
- remove EXCP_EXECUTE_SVC in non user-mode
- add descriptions to more cc_ops
- add comment on time2tod
- remove redundant EXECUTE_SVC
- describe EXCP_EXT
---
target-s390x/cpu.h | 770 +++++++++++++++++++++++++++++++++++++++++++++++++---
1 files changed, 726 insertions(+), 44 deletions(-)
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index e47c372..0d9c4f2 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -26,24 +26,35 @@
#define CPUState struct CPUS390XState
#include "cpu-defs.h"
+#define TARGET_PAGE_BITS 12
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+
+#include "cpu-all.h"
#include "softfloat.h"
-#define NB_MMU_MODES 2
+#define NB_MMU_MODES 3
-typedef union FPReg {
- struct {
-#ifdef WORDS_BIGENDIAN
- float32 e;
- int32_t __pad;
-#else
- int32_t __pad;
- float32 e;
-#endif
- };
- float64 d;
- uint64_t i;
-} FPReg;
+#define MMU_MODE0_SUFFIX _primary
+#define MMU_MODE1_SUFFIX _secondary
+#define MMU_MODE2_SUFFIX _home
+
+#define MMU_USER_IDX 1
+
+#define MAX_EXT_QUEUE 16
+
+typedef struct PSW {
+ uint64_t mask;
+ uint64_t addr;
+} PSW;
+
+typedef struct ExtQueue {
+ uint32_t code;
+ uint32_t param;
+ uint32_t param64;
+} ExtQueue;
typedef struct CPUS390XState {
uint64_t regs[16]; /* GP registers */
@@ -51,17 +62,41 @@ typedef struct CPUS390XState {
uint32_t aregs[16]; /* access registers */
uint32_t fpc; /* floating-point control register */
- FPReg fregs[16]; /* FP registers */
+ CPU_DoubleU fregs[16]; /* FP registers */
float_status fpu_status; /* passed to softfloat lib */
- struct {
- uint64_t mask;
- uint64_t addr;
- } psw;
+ PSW psw;
- int cc; /* condition code (0-3) */
+ uint32_t cc_op;
+ uint64_t cc_src;
+ uint64_t cc_dst;
+ uint64_t cc_vr;
uint64_t __excp_addr;
+ uint64_t psa;
+
+ uint32_t int_pgm_code;
+ uint32_t int_pgm_ilc;
+
+ uint32_t int_svc_code;
+ uint32_t int_svc_ilc;
+
+ uint64_t cregs[16]; /* control registers */
+
+ int pending_int;
+ ExtQueue ext_queue[MAX_EXT_QUEUE];
+
+ /* reset does memset(0) up to here */
+
+ int ext_index;
+ int cpu_num;
+ uint8_t *storage_keys;
+
+ uint64_t tod_offset;
+ uint64_t tod_basetime;
+ QEMUTimer *tod_timer;
+
+ QEMUTimer *cpu_timer;
CPU_COMMON
} CPUS390XState;
@@ -69,24 +104,174 @@ typedef struct CPUS390XState {
#if defined(CONFIG_USER_ONLY)
static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
{
- if (newsp)
+ if (newsp) {
env->regs[15] = newsp;
+ }
env->regs[0] = 0;
}
#endif
-#define MMU_MODE0_SUFFIX _kernel
-#define MMU_MODE1_SUFFIX _user
-#define MMU_USER_IDX 1
+/* Interrupt Codes */
+/* Program Interrupts */
+#define PGM_OPERATION 0x0001
+#define PGM_PRIVILEGED 0x0002
+#define PGM_EXECUTE 0x0003
+#define PGM_PROTECTION 0x0004
+#define PGM_ADDRESSING 0x0005
+#define PGM_SPECIFICATION 0x0006
+#define PGM_DATA 0x0007
+#define PGM_FIXPT_OVERFLOW 0x0008
+#define PGM_FIXPT_DIVIDE 0x0009
+#define PGM_DEC_OVERFLOW 0x000a
+#define PGM_DEC_DIVIDE 0x000b
+#define PGM_HFP_EXP_OVERFLOW 0x000c
+#define PGM_HFP_EXP_UNDERFLOW 0x000d
+#define PGM_HFP_SIGNIFICANCE 0x000e
+#define PGM_HFP_DIVIDE 0x000f
+#define PGM_SEGMENT_TRANS 0x0010
+#define PGM_PAGE_TRANS 0x0011
+#define PGM_TRANS_SPEC 0x0012
+#define PGM_SPECIAL_OP 0x0013
+#define PGM_OPERAND 0x0015
+#define PGM_TRACE_TABLE 0x0016
+#define PGM_SPACE_SWITCH 0x001c
+#define PGM_HFP_SQRT 0x001d
+#define PGM_PC_TRANS_SPEC 0x001f
+#define PGM_AFX_TRANS 0x0020
+#define PGM_ASX_TRANS 0x0021
+#define PGM_LX_TRANS 0x0022
+#define PGM_EX_TRANS 0x0023
+#define PGM_PRIM_AUTH 0x0024
+#define PGM_SEC_AUTH 0x0025
+#define PGM_ALET_SPEC 0x0028
+#define PGM_ALEN_SPEC 0x0029
+#define PGM_ALE_SEQ 0x002a
+#define PGM_ASTE_VALID 0x002b
+#define PGM_ASTE_SEQ 0x002c
+#define PGM_EXT_AUTH 0x002d
+#define PGM_STACK_FULL 0x0030
+#define PGM_STACK_EMPTY 0x0031
+#define PGM_STACK_SPEC 0x0032
+#define PGM_STACK_TYPE 0x0033
+#define PGM_STACK_OP 0x0034
+#define PGM_ASCE_TYPE 0x0038
+#define PGM_REG_FIRST_TRANS 0x0039
+#define PGM_REG_SEC_TRANS 0x003a
+#define PGM_REG_THIRD_TRANS 0x003b
+#define PGM_MONITOR 0x0040
+#define PGM_PER 0x0080
+#define PGM_CRYPTO 0x0119
+
+/* External Interrupts */
+#define EXT_INTERRUPT_KEY 0x0040
+#define EXT_CLOCK_COMP 0x1004
+#define EXT_CPU_TIMER 0x1005
+#define EXT_MALFUNCTION 0x1200
+#define EXT_EMERGENCY 0x1201
+#define EXT_EXTERNAL_CALL 0x1202
+#define EXT_ETR 0x1406
+#define EXT_SERVICE 0x2401
+#define EXT_VIRTIO 0x2603
+
+/* PSW defines */
+#undef PSW_MASK_PER
+#undef PSW_MASK_DAT
+#undef PSW_MASK_IO
+#undef PSW_MASK_EXT
+#undef PSW_MASK_KEY
+#undef PSW_SHIFT_KEY
+#undef PSW_MASK_MCHECK
+#undef PSW_MASK_WAIT
+#undef PSW_MASK_PSTATE
+#undef PSW_MASK_ASC
+#undef PSW_MASK_CC
+#undef PSW_MASK_PM
+#undef PSW_MASK_64
+
+#define PSW_MASK_PER 0x4000000000000000ULL
+#define PSW_MASK_DAT 0x0400000000000000ULL
+#define PSW_MASK_IO 0x0200000000000000ULL
+#define PSW_MASK_EXT 0x0100000000000000ULL
+#define PSW_MASK_KEY 0x00F0000000000000ULL
+#define PSW_SHIFT_KEY 56
+#define PSW_MASK_MCHECK 0x0004000000000000ULL
+#define PSW_MASK_WAIT 0x0002000000000000ULL
+#define PSW_MASK_PSTATE 0x0001000000000000ULL
+#define PSW_MASK_ASC 0x0000C00000000000ULL
+#define PSW_MASK_CC 0x0000300000000000ULL
+#define PSW_MASK_PM 0x00000F0000000000ULL
+#define PSW_MASK_64 0x0000000100000000ULL
+#define PSW_MASK_32 0x0000000080000000ULL
+
+#undef PSW_ASC_PRIMARY
+#undef PSW_ASC_ACCREG
+#undef PSW_ASC_SECONDARY
+#undef PSW_ASC_HOME
+
+#define PSW_ASC_PRIMARY 0x0000000000000000ULL
+#define PSW_ASC_ACCREG 0x0000400000000000ULL
+#define PSW_ASC_SECONDARY 0x0000800000000000ULL
+#define PSW_ASC_HOME 0x0000C00000000000ULL
+
+/* tb flags */
+
+#define FLAG_MASK_PER (PSW_MASK_PER >> 32)
+#define FLAG_MASK_DAT (PSW_MASK_DAT >> 32)
+#define FLAG_MASK_IO (PSW_MASK_IO >> 32)
+#define FLAG_MASK_EXT (PSW_MASK_EXT >> 32)
+#define FLAG_MASK_KEY (PSW_MASK_KEY >> 32)
+#define FLAG_MASK_MCHECK (PSW_MASK_MCHECK >> 32)
+#define FLAG_MASK_WAIT (PSW_MASK_WAIT >> 32)
+#define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> 32)
+#define FLAG_MASK_ASC (PSW_MASK_ASC >> 32)
+#define FLAG_MASK_CC (PSW_MASK_CC >> 32)
+#define FLAG_MASK_PM (PSW_MASK_PM >> 32)
+#define FLAG_MASK_64 (PSW_MASK_64 >> 32)
+#define FLAG_MASK_32 0x00001000
+
static inline int cpu_mmu_index (CPUState *env)
{
- /* XXX: Currently we don't implement virtual memory */
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ return 1;
+ }
+
return 0;
}
+static inline void cpu_get_tb_cpu_state(CPUState* env, target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
+ *pc = env->psw.addr;
+ *cs_base = 0;
+ *flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
+ ((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
+}
+
+static inline int get_ilc(uint8_t opc)
+{
+ switch (opc >> 6) {
+ case 0:
+ return 1;
+ case 1:
+ case 2:
+ return 2;
+ case 3:
+ return 3;
+ }
+
+ return 0;
+}
+
+#define ILC_LATER 0x20
+#define ILC_LATER_INC 0x21
+#define ILC_LATER_INC_2 0x22
+
+
CPUS390XState *cpu_s390x_init(const char *cpu_model);
+void s390x_translate_init(void);
int cpu_s390x_exec(CPUS390XState *s);
void cpu_s390x_close(CPUS390XState *s);
+void do_interrupt (CPUState *env);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
@@ -97,41 +282,60 @@ int cpu_s390x_handle_mmu_fault (CPUS390XState *env, target_ulong address, int rw
int mmu_idx, int is_softmuu);
#define cpu_handle_mmu_fault cpu_s390x_handle_mmu_fault
-#define TARGET_PAGE_BITS 12
-
-/* ??? This is certainly wrong for 64-bit s390x, but given that only KVM
- emulation actually works, this is good enough for a placeholder. */
-#define TARGET_PHYS_ADDR_SPACE_BITS 32
-#define TARGET_VIRT_ADDR_SPACE_BITS 32
#ifndef CONFIG_USER_ONLY
-int s390_virtio_hypercall(CPUState *env);
+int s390_virtio_hypercall(CPUState *env, uint64_t mem, uint64_t hypercall);
+void kvm_s390_interrupt(CPUState *env, int type, uint32_t code);
void kvm_s390_virtio_irq(CPUState *env, int config_change, uint64_t token);
+void kvm_s390_interrupt_internal(CPUState *env, int type, uint32_t parm,
+ uint64_t parm64, int vm);
CPUState *s390_cpu_addr2state(uint16_t cpu_addr);
+
+#ifndef KVM_S390_SIGP_STOP
+#define KVM_S390_SIGP_STOP 0
+#define KVM_S390_PROGRAM_INT 0
+#define KVM_S390_SIGP_SET_PREFIX 0
+#define KVM_S390_RESTART 0
+#define KVM_S390_INT_VIRTIO 0
+#define KVM_S390_INT_SERVICE 0
+#define KVM_S390_INT_EMERGENCY 0
+#endif
+
#endif
+void cpu_lock(void);
+void cpu_unlock(void);
+static inline void cpu_set_tls(CPUS390XState *env, target_ulong newtls)
+{
+ env->aregs[0] = newtls >> 32;
+ env->aregs[1] = newtls & 0xffffffffULL;
+}
#define cpu_init cpu_s390x_init
#define cpu_exec cpu_s390x_exec
#define cpu_gen_code cpu_s390x_gen_code
+#define cpu_signal_handler cpu_s390x_signal_handler
-#include "cpu-all.h"
+#include "exec-all.h"
+
+#ifdef CONFIG_USER_ONLY
#define EXCP_OPEX 1 /* operation exception (sigill) */
#define EXCP_SVC 2 /* supervisor call (syscall) */
#define EXCP_ADDR 5 /* addressing exception */
-#define EXCP_EXECUTE_SVC 0xff00000 /* supervisor call via execute insn */
+#define EXCP_SPEC 6 /* specification exception */
-static inline void cpu_get_tb_cpu_state(CPUState* env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
-{
- *pc = env->psw.addr;
- /* XXX this is correct for user-mode emulation, but needs
- * the asce register information as well when softmmu
- * is implemented in the future */
- *cs_base = 0;
- *flags = env->psw.mask;
-}
+#else
+
+#define EXCP_EXT 1 /* external interrupt */
+#define EXCP_SVC 2 /* supervisor call (syscall) */
+#define EXCP_PGM 3 /* program interruption */
+
+#endif /* CONFIG_USER_ONLY */
+
+#define INTERRUPT_EXT (1 << 0)
+#define INTERRUPT_TOD (1 << 1)
+#define INTERRUPT_CPUTIMER (1 << 2)
/* Program Status Word. */
#define S390_PSWM_REGNUM 0
@@ -265,5 +469,483 @@ static inline void cpu_get_tb_cpu_state(CPUState* env, target_ulong *pc,
#define S390_NUM_PSEUDO_REGS 2
#define S390_NUM_TOTAL_REGS (S390_NUM_REGS+2)
+/* CC optimization */
+
+enum cc_op {
+ CC_OP_CONST0 = 0, /* CC is 0 */
+ CC_OP_CONST1, /* CC is 1 */
+ CC_OP_CONST2, /* CC is 2 */
+ CC_OP_CONST3, /* CC is 3 */
+
+ CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
+ CC_OP_STATIC, /* CC value is env->cc_op */
+
+ CC_OP_NZ, /* env->cc_dst != 0 */
+ CC_OP_LTGT_32, /* signed less/greater than (32bit) */
+ CC_OP_LTGT_64, /* signed less/greater than (64bit) */
+ CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
+ CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
+ CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
+ CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
+
+ CC_OP_ADD_64, /* overflow on add (64bit) */
+ CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
+ CC_OP_SUB_64, /* overflow on substraction (64bit) */
+ CC_OP_SUBU_64, /* overflow on unsigned substraction (64bit) */
+ CC_OP_ABS_64, /* sign eval on abs (64bit) */
+ CC_OP_NABS_64, /* sign eval on nabs (64bit) */
+
+ CC_OP_ADD_32, /* overflow on add (32bit) */
+ CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
+ CC_OP_SUB_32, /* overflow on substraction (32bit) */
+ CC_OP_SUBU_32, /* overflow on unsigned substraction (32bit) */
+ CC_OP_ABS_32, /* sign eval on abs (64bit) */
+ CC_OP_NABS_32, /* sign eval on nabs (64bit) */
+
+ CC_OP_COMP_32, /* complement */
+ CC_OP_COMP_64, /* complement */
+
+ CC_OP_TM_32, /* test under mask (32bit) */
+ CC_OP_TM_64, /* test under mask (64bit) */
+
+ CC_OP_LTGT_F32, /* FP compare (32bit) */
+ CC_OP_LTGT_F64, /* FP compare (64bit) */
+
+ CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
+ CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
+
+ CC_OP_ICM, /* insert characters under mask */
+ CC_OP_MAX
+};
+
+static const char *cc_names[] = {
+ [CC_OP_CONST0] = "CC_OP_CONST0",
+ [CC_OP_CONST1] = "CC_OP_CONST1",
+ [CC_OP_CONST2] = "CC_OP_CONST2",
+ [CC_OP_CONST3] = "CC_OP_CONST3",
+ [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
+ [CC_OP_STATIC] = "CC_OP_STATIC",
+ [CC_OP_NZ] = "CC_OP_NZ",
+ [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
+ [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
+ [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
+ [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
+ [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
+ [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
+ [CC_OP_ADD_64] = "CC_OP_ADD_64",
+ [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
+ [CC_OP_SUB_64] = "CC_OP_SUB_64",
+ [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
+ [CC_OP_ABS_64] = "CC_OP_ABS_64",
+ [CC_OP_NABS_64] = "CC_OP_NABS_64",
+ [CC_OP_ADD_32] = "CC_OP_ADD_32",
+ [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
+ [CC_OP_SUB_32] = "CC_OP_SUB_32",
+ [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
+ [CC_OP_ABS_32] = "CC_OP_ABS_32",
+ [CC_OP_NABS_32] = "CC_OP_NABS_32",
+ [CC_OP_COMP_32] = "CC_OP_COMP_32",
+ [CC_OP_COMP_64] = "CC_OP_COMP_64",
+ [CC_OP_TM_32] = "CC_OP_TM_32",
+ [CC_OP_TM_64] = "CC_OP_TM_64",
+ [CC_OP_LTGT_F32] = "CC_OP_LTGT_F32",
+ [CC_OP_LTGT_F64] = "CC_OP_LTGT_F64",
+ [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
+ [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
+ [CC_OP_ICM] = "CC_OP_ICM",
+};
+
+static inline const char *cc_name(int cc_op)
+{
+ return cc_names[cc_op];
+}
+
+/* SCLP PV interface defines */
+#define SCLP_CMDW_READ_SCP_INFO 0x00020001
+#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
+
+#define SCP_LENGTH 0x00
+#define SCP_FUNCTION_CODE 0x02
+#define SCP_CONTROL_MASK 0x03
+#define SCP_RESPONSE_CODE 0x06
+#define SCP_MEM_CODE 0x08
+#define SCP_INCREMENT 0x0a
+
+typedef struct LowCore
+{
+ /* prefix area: defined by architecture */
+ uint32_t ccw1[2]; /* 0x000 */
+ uint32_t ccw2[4]; /* 0x008 */
+ uint8_t pad1[0x80-0x18]; /* 0x018 */
+ uint32_t ext_params; /* 0x080 */
+ uint16_t cpu_addr; /* 0x084 */
+ uint16_t ext_int_code; /* 0x086 */
+ uint16_t svc_ilc; /* 0x088 */
+ uint16_t svc_code; /* 0x08a */
+ uint16_t pgm_ilc; /* 0x08c */
+ uint16_t pgm_code; /* 0x08e */
+ uint32_t data_exc_code; /* 0x090 */
+ uint16_t mon_class_num; /* 0x094 */
+ uint16_t per_perc_atmid; /* 0x096 */
+ uint64_t per_address; /* 0x098 */
+ uint8_t exc_access_id; /* 0x0a0 */
+ uint8_t per_access_id; /* 0x0a1 */
+ uint8_t op_access_id; /* 0x0a2 */
+ uint8_t ar_access_id; /* 0x0a3 */
+ uint8_t pad2[0xA8-0xA4]; /* 0x0a4 */
+ uint64_t trans_exc_code; /* 0x0a8 */
+ uint64_t monitor_code; /* 0x0b0 */
+ uint16_t subchannel_id; /* 0x0b8 */
+ uint16_t subchannel_nr; /* 0x0ba */
+ uint32_t io_int_parm; /* 0x0bc */
+ uint32_t io_int_word; /* 0x0c0 */
+ uint8_t pad3[0xc8-0xc4]; /* 0x0c4 */
+ uint32_t stfl_fac_list; /* 0x0c8 */
+ uint8_t pad4[0xe8-0xcc]; /* 0x0cc */
+ uint32_t mcck_interruption_code[2]; /* 0x0e8 */
+ uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */
+ uint32_t external_damage_code; /* 0x0f4 */
+ uint64_t failing_storage_address; /* 0x0f8 */
+ uint8_t pad6[0x120-0x100]; /* 0x100 */
+ PSW restart_old_psw; /* 0x120 */
+ PSW external_old_psw; /* 0x130 */
+ PSW svc_old_psw; /* 0x140 */
+ PSW program_old_psw; /* 0x150 */
+ PSW mcck_old_psw; /* 0x160 */
+ PSW io_old_psw; /* 0x170 */
+ uint8_t pad7[0x1a0-0x180]; /* 0x180 */
+ PSW restart_psw; /* 0x1a0 */
+ PSW external_new_psw; /* 0x1b0 */
+ PSW svc_new_psw; /* 0x1c0 */
+ PSW program_new_psw; /* 0x1d0 */
+ PSW mcck_new_psw; /* 0x1e0 */
+ PSW io_new_psw; /* 0x1f0 */
+ PSW return_psw; /* 0x200 */
+ uint8_t irb[64]; /* 0x210 */
+ uint64_t sync_enter_timer; /* 0x250 */
+ uint64_t async_enter_timer; /* 0x258 */
+ uint64_t exit_timer; /* 0x260 */
+ uint64_t last_update_timer; /* 0x268 */
+ uint64_t user_timer; /* 0x270 */
+ uint64_t system_timer; /* 0x278 */
+ uint64_t last_update_clock; /* 0x280 */
+ uint64_t steal_clock; /* 0x288 */
+ PSW return_mcck_psw; /* 0x290 */
+ uint8_t pad8[0xc00-0x2a0]; /* 0x2a0 */
+ /* System info area */
+ uint64_t save_area[16]; /* 0xc00 */
+ uint8_t pad9[0xd40-0xc80]; /* 0xc80 */
+ uint64_t kernel_stack; /* 0xd40 */
+ uint64_t thread_info; /* 0xd48 */
+ uint64_t async_stack; /* 0xd50 */
+ uint64_t kernel_asce; /* 0xd58 */
+ uint64_t user_asce; /* 0xd60 */
+ uint64_t panic_stack; /* 0xd68 */
+ uint64_t user_exec_asce; /* 0xd70 */
+ uint8_t pad10[0xdc0-0xd78]; /* 0xd78 */
+
+ /* SMP info area: defined by DJB */
+ uint64_t clock_comparator; /* 0xdc0 */
+ uint64_t ext_call_fast; /* 0xdc8 */
+ uint64_t percpu_offset; /* 0xdd0 */
+ uint64_t current_task; /* 0xdd8 */
+ uint32_t softirq_pending; /* 0xde0 */
+ uint32_t pad_0x0de4; /* 0xde4 */
+ uint64_t int_clock; /* 0xde8 */
+ uint8_t pad12[0xe00-0xdf0]; /* 0xdf0 */
+
+ /* 0xe00 is used as indicator for dump tools */
+ /* whether the kernel died with panic() or not */
+ uint32_t panic_magic; /* 0xe00 */
+
+ uint8_t pad13[0x11b8-0xe04]; /* 0xe04 */
+
+ /* 64 bit extparam used for pfault, diag 250 etc */
+ uint64_t ext_params2; /* 0x11B8 */
+
+ uint8_t pad14[0x1200-0x11C0]; /* 0x11C0 */
+
+ /* System info area */
+
+ uint64_t floating_pt_save_area[16]; /* 0x1200 */
+ uint64_t gpregs_save_area[16]; /* 0x1280 */
+ uint32_t st_status_fixed_logout[4]; /* 0x1300 */
+ uint8_t pad15[0x1318-0x1310]; /* 0x1310 */
+ uint32_t prefixreg_save_area; /* 0x1318 */
+ uint32_t fpt_creg_save_area; /* 0x131c */
+ uint8_t pad16[0x1324-0x1320]; /* 0x1320 */
+ uint32_t tod_progreg_save_area; /* 0x1324 */
+ uint32_t cpu_timer_save_area[2]; /* 0x1328 */
+ uint32_t clock_comp_save_area[2]; /* 0x1330 */
+ uint8_t pad17[0x1340-0x1338]; /* 0x1338 */
+ uint32_t access_regs_save_area[16]; /* 0x1340 */
+ uint64_t cregs_save_area[16]; /* 0x1380 */
+
+ /* align to the top of the prefix area */
+
+ uint8_t pad18[0x2000-0x1400]; /* 0x1400 */
+} __attribute__((packed)) LowCore;
+
+/* STSI */
+#define STSI_LEVEL_MASK 0x00000000f0000000ULL
+#define STSI_LEVEL_CURRENT 0x0000000000000000ULL
+#define STSI_LEVEL_1 0x0000000010000000ULL
+#define STSI_LEVEL_2 0x0000000020000000ULL
+#define STSI_LEVEL_3 0x0000000030000000ULL
+#define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
+#define STSI_R0_SEL1_MASK 0x00000000000000ffULL
+#define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
+#define STSI_R1_SEL2_MASK 0x000000000000ffffULL
+
+/* Basic Machine Configuration */
+struct sysib_111 {
+ uint32_t res1[8];
+ uint8_t manuf[16];
+ uint8_t type[4];
+ uint8_t res2[12];
+ uint8_t model[16];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint8_t res3[156];
+};
+
+/* Basic Machine CPU */
+struct sysib_121 {
+ uint32_t res1[80];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint8_t res2[2];
+ uint16_t cpu_addr;
+ uint8_t res3[152];
+};
+
+/* Basic Machine CPUs */
+struct sysib_122 {
+ uint8_t res1[32];
+ uint32_t capability;
+ uint16_t total_cpus;
+ uint16_t active_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint16_t adjustments[2026];
+};
+
+/* LPAR CPU */
+struct sysib_221 {
+ uint32_t res1[80];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint16_t cpu_id;
+ uint16_t cpu_addr;
+ uint8_t res3[152];
+};
+
+/* LPAR CPUs */
+struct sysib_222 {
+ uint32_t res1[32];
+ uint16_t lpar_num;
+ uint8_t res2;
+ uint8_t lcpuc;
+ uint16_t total_cpus;
+ uint16_t conf_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint8_t name[8];
+ uint32_t caf;
+ uint8_t res3[16];
+ uint16_t dedicated_cpus;
+ uint16_t shared_cpus;
+ uint8_t res4[180];
+};
+
+/* VM CPUs */
+struct sysib_322 {
+ uint8_t res1[31];
+ uint8_t count;
+ struct {
+ uint8_t res2[4];
+ uint16_t total_cpus;
+ uint16_t conf_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint8_t name[8];
+ uint32_t caf;
+ uint8_t cpi[16];
+ uint8_t res3[24];
+ } vm[8];
+ uint8_t res4[3552];
+};
+
+/* MMU defines */
+#define _ASCE_ORIGIN ~0xfffULL /* segment table origin */
+#define _ASCE_SUBSPACE 0x200 /* subspace group control */
+#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
+#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
+#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
+#define _ASCE_REAL_SPACE 0x20 /* real space control */
+#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
+#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
+#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
+#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
+#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
+#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
+
+#define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */
+#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
+#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
+#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
+#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
+#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
+#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
+
+#define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */
+#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+
+#define _PAGE_RO 0x200 /* HW read-only bit */
+#define _PAGE_INVALID 0x400 /* HW invalid bit */
+
+
+
+/* EBCDIC handling */
+static const uint8_t ebcdic2ascii[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
+ 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
+ 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
+ 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
+ 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
+ 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
+ 0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21,
+ 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
+ 0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E,
+ 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
+ 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
+ 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
+ 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
+ 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+ 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
+ 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
+ 0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
+ 0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07,
+ 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
+ 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
+ 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
+ 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07,
+};
+
+static const uint8_t ascii2ebcdic [] = {
+ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
+ 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
+ 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
+ 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
+ 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
+ 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+ 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
+ 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D,
+ 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
+ 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
+};
+
+static inline void ebcdic_put(uint8_t *p, const char *ascii, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ p[i] = ascii2ebcdic[(int)ascii[i]];
+ }
+}
+
+#define SIGP_SENSE 0x01
+#define SIGP_EXTERNAL_CALL 0x02
+#define SIGP_EMERGENCY 0x03
+#define SIGP_START 0x04
+#define SIGP_STOP 0x05
+#define SIGP_RESTART 0x06
+#define SIGP_STOP_STORE_STATUS 0x09
+#define SIGP_INITIAL_CPU_RESET 0x0b
+#define SIGP_CPU_RESET 0x0c
+#define SIGP_SET_PREFIX 0x0d
+#define SIGP_STORE_STATUS_ADDR 0x0e
+#define SIGP_SET_ARCH 0x12
+
+/* cpu status bits */
+#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
+#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
+#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
+#define SIGP_STAT_STOPPED 0x00000040UL
+#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
+#define SIGP_STAT_CHECK_STOP 0x00000010UL
+#define SIGP_STAT_INOPERATIVE 0x00000004UL
+#define SIGP_STAT_INVALID_ORDER 0x00000002UL
+#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
+
+void load_psw(CPUState *env, uint64_t mask, uint64_t addr);
+int mmu_translate(CPUState *env, target_ulong vaddr, int rw, uint64_t asc,
+ target_ulong *raddr, int *flags);
+int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code);
+uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr);
+
+#define TARGET_HAS_ICE 1
+
+/* The value of the TOD clock for 1.1.1970. */
+#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+
+/* Converts ns to s390's clock format */
+static inline uint64_t time2tod(uint64_t ns) {
+ return (ns << 9) / 125;
+}
+
+static inline void cpu_inject_ext(CPUState *env, uint32_t code, uint32_t param,
+ uint64_t param64)
+{
+ if (env->ext_index == MAX_EXT_QUEUE - 1) {
+ /* ugh - can't queue anymore. Let's drop. */
+ return;
+ }
+
+ env->ext_index++;
+ assert(env->ext_index < MAX_EXT_QUEUE);
+
+ env->ext_queue[env->ext_index].code = code;
+ env->ext_queue[env->ext_index].param = param;
+ env->ext_queue[env->ext_index].param64 = param64;
+
+ env->pending_int |= INTERRUPT_EXT;
+ cpu_interrupt(env, CPU_INTERRUPT_HARD);
+}
#endif
--
1.6.0.2
^ permalink raw reply related [flat|nested] 29+ messages in thread
* [Qemu-devel] [PATCH 16/19] s390x: Implement opcode helpers
2011-03-29 13:29 [Qemu-devel] [PATCH 00/19] s390x emulation support Alexander Graf
` (14 preceding siblings ...)
2011-03-29 13:29 ` [Qemu-devel] [PATCH 15/19] s390x: helper functions for system emulation Alexander Graf
@ 2011-03-29 13:29 ` Alexander Graf
2011-03-29 16:13 ` [Qemu-devel] " Richard Henderson
2011-03-29 13:29 ` [Qemu-devel] [PATCH 17/19] s390x: Adjust internal kvm code Alexander Graf
` (4 subsequent siblings)
20 siblings, 1 reply; 29+ messages in thread
From: Alexander Graf @ 2011-03-29 13:29 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, aurelien, rth
There are some instructions that can't (or shouldn't) be expressed by pure
tcg code. For those, we call into externally compiled C functions.
This patch implements those C functions.
Signed-off-by: Alexander Graf <agraf@suse.de>
---
v1 -> v2:
- new clock syntax
- use float_chs
- use float compare built-ins
- remove redundant EXECUTE_SVC
---
target-s390x/helpers.h | 151 +++
target-s390x/op_helper.c | 2880 +++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 3029 insertions(+), 2 deletions(-)
create mode 100644 target-s390x/helpers.h
diff --git a/target-s390x/helpers.h b/target-s390x/helpers.h
new file mode 100644
index 0000000..6ca48eb
--- /dev/null
+++ b/target-s390x/helpers.h
@@ -0,0 +1,151 @@
+#include "def-helper.h"
+
+DEF_HELPER_1(exception, void, i32)
+DEF_HELPER_3(nc, i32, i32, i64, i64)
+DEF_HELPER_3(oc, i32, i32, i64, i64)
+DEF_HELPER_3(xc, i32, i32, i64, i64)
+DEF_HELPER_3(mvc, void, i32, i64, i64)
+DEF_HELPER_3(clc, i32, i32, i64, i64)
+DEF_HELPER_2(mvcl, i32, i32, i32)
+DEF_HELPER_FLAGS_1(set_cc_comp_s32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32)
+DEF_HELPER_FLAGS_1(set_cc_comp_s64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64)
+DEF_HELPER_FLAGS_2(set_cc_icm, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32)
+DEF_HELPER_3(clm, i32, i32, i32, i64)
+DEF_HELPER_3(stcm, void, i32, i32, i64)
+DEF_HELPER_2(mlg, void, i32, i64)
+DEF_HELPER_2(dlg, void, i32, i64)
+DEF_HELPER_FLAGS_3(set_cc_add64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64)
+DEF_HELPER_FLAGS_3(set_cc_addu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_3(set_cc_add32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32)
+DEF_HELPER_FLAGS_3(set_cc_addu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(set_cc_sub64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64)
+DEF_HELPER_FLAGS_3(set_cc_subu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_3(set_cc_sub32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32)
+DEF_HELPER_FLAGS_3(set_cc_subu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
+DEF_HELPER_3(srst, i32, i32, i32, i32)
+DEF_HELPER_3(clst, i32, i32, i32, i32)
+DEF_HELPER_3(mvpg, void, i64, i64, i64)
+DEF_HELPER_3(mvst, void, i32, i32, i32)
+DEF_HELPER_3(csg, i32, i32, i64, i32)
+DEF_HELPER_3(cdsg, i32, i32, i64, i32)
+DEF_HELPER_3(cs, i32, i32, i64, i32)
+DEF_HELPER_4(ex, i32, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_1(abs_i32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32)
+DEF_HELPER_FLAGS_1(nabs_i32, TCG_CALL_PURE|TCG_CALL_CONST, s32, s32)
+DEF_HELPER_FLAGS_1(abs_i64, TCG_CALL_PURE|TCG_CALL_CONST, i64, s64)
+DEF_HELPER_FLAGS_1(nabs_i64, TCG_CALL_PURE|TCG_CALL_CONST, s64, s64)
+DEF_HELPER_3(stcmh, void, i32, i64, i32)
+DEF_HELPER_3(icmh, i32, i32, i64, i32)
+DEF_HELPER_2(ipm, void, i32, i32)
+DEF_HELPER_FLAGS_3(addc_u32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(set_cc_addc_u64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
+DEF_HELPER_3(stam, void, i32, i64, i32)
+DEF_HELPER_3(lam, void, i32, i64, i32)
+DEF_HELPER_3(mvcle, i32, i32, i64, i32)
+DEF_HELPER_3(clcle, i32, i32, i64, i32)
+DEF_HELPER_3(slb, i32, i32, i32, i32)
+DEF_HELPER_4(slbg, i32, i32, i32, i64, i64)
+DEF_HELPER_2(cefbr, void, i32, s32)
+DEF_HELPER_2(cdfbr, void, i32, s32)
+DEF_HELPER_2(cxfbr, void, i32, s32)
+DEF_HELPER_2(cegbr, void, i32, s64)
+DEF_HELPER_2(cdgbr, void, i32, s64)
+DEF_HELPER_2(cxgbr, void, i32, s64)
+DEF_HELPER_2(adbr, i32, i32, i32)
+DEF_HELPER_2(aebr, i32, i32, i32)
+DEF_HELPER_2(sebr, i32, i32, i32)
+DEF_HELPER_2(sdbr, i32, i32, i32)
+DEF_HELPER_2(debr, void, i32, i32)
+DEF_HELPER_2(dxbr, void, i32, i32)
+DEF_HELPER_2(mdbr, void, i32, i32)
+DEF_HELPER_2(mxbr, void, i32, i32)
+DEF_HELPER_2(ldebr, void, i32, i32)
+DEF_HELPER_2(ldxbr, void, i32, i32)
+DEF_HELPER_2(lxdbr, void, i32, i32)
+DEF_HELPER_2(ledbr, void, i32, i32)
+DEF_HELPER_2(lexbr, void, i32, i32)
+DEF_HELPER_2(lpebr, i32, i32, i32)
+DEF_HELPER_2(lpdbr, i32, i32, i32)
+DEF_HELPER_2(lpxbr, i32, i32, i32)
+DEF_HELPER_2(ltebr, i32, i32, i32)
+DEF_HELPER_2(ltdbr, i32, i32, i32)
+DEF_HELPER_2(ltxbr, i32, i32, i32)
+DEF_HELPER_2(lcebr, i32, i32, i32)
+DEF_HELPER_2(lcdbr, i32, i32, i32)
+DEF_HELPER_2(lcxbr, i32, i32, i32)
+DEF_HELPER_2(aeb, void, i32, i32)
+DEF_HELPER_2(deb, void, i32, i32)
+DEF_HELPER_2(meeb, void, i32, i32)
+DEF_HELPER_2(cdb, i32, i32, i64)
+DEF_HELPER_2(adb, i32, i32, i64)
+DEF_HELPER_2(seb, void, i32, i32)
+DEF_HELPER_2(sdb, i32, i32, i64)
+DEF_HELPER_2(mdb, void, i32, i64)
+DEF_HELPER_2(ddb, void, i32, i64)
+DEF_HELPER_FLAGS_2(cebr, TCG_CALL_PURE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(cdbr, TCG_CALL_PURE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(cxbr, TCG_CALL_PURE, i32, i32, i32)
+DEF_HELPER_3(cgebr, i32, i32, i32, i32)
+DEF_HELPER_3(cgdbr, i32, i32, i32, i32)
+DEF_HELPER_3(cgxbr, i32, i32, i32, i32)
+DEF_HELPER_1(lzer, void, i32)
+DEF_HELPER_1(lzdr, void, i32)
+DEF_HELPER_1(lzxr, void, i32)
+DEF_HELPER_3(cfebr, i32, i32, i32, i32)
+DEF_HELPER_3(cfdbr, i32, i32, i32, i32)
+DEF_HELPER_3(cfxbr, i32, i32, i32, i32)
+DEF_HELPER_2(axbr, i32, i32, i32)
+DEF_HELPER_2(sxbr, i32, i32, i32)
+DEF_HELPER_2(meebr, void, i32, i32)
+DEF_HELPER_2(ddbr, void, i32, i32)
+DEF_HELPER_3(madb, void, i32, i64, i32)
+DEF_HELPER_3(maebr, void, i32, i32, i32)
+DEF_HELPER_3(madbr, void, i32, i32, i32)
+DEF_HELPER_3(msdbr, void, i32, i32, i32)
+DEF_HELPER_2(lxdb, void, i32, i64)
+DEF_HELPER_FLAGS_2(tceb, TCG_CALL_PURE, i32, i32, i64)
+DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_PURE, i32, i32, i64)
+DEF_HELPER_FLAGS_2(tcxb, TCG_CALL_PURE, i32, i32, i64)
+DEF_HELPER_2(flogr, i32, i32, i64)
+DEF_HELPER_2(sqdbr, void, i32, i32)
+DEF_HELPER_FLAGS_1(cvd, TCG_CALL_PURE|TCG_CALL_CONST, i64, s32)
+DEF_HELPER_3(unpk, void, i32, i64, i64)
+DEF_HELPER_3(tr, void, i32, i64, i64)
+
+DEF_HELPER_2(servc, i32, i32, i64)
+DEF_HELPER_3(diag, i64, i32, i64, i64)
+DEF_HELPER_2(load_psw, void, i64, i64)
+DEF_HELPER_1(program_interrupt, void, i32)
+DEF_HELPER_FLAGS_1(stidp, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(spx, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(sck, TCG_CALL_CONST, i32, i64)
+DEF_HELPER_1(stck, i32, i64)
+DEF_HELPER_1(stcke, i32, i64)
+DEF_HELPER_FLAGS_1(sckc, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(stckc, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(spt, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(stpt, TCG_CALL_CONST, void, i64)
+DEF_HELPER_3(stsi, i32, i64, i32, i32)
+DEF_HELPER_3(lctl, void, i32, i64, i32)
+DEF_HELPER_3(lctlg, void, i32, i64, i32)
+DEF_HELPER_3(stctl, void, i32, i64, i32)
+DEF_HELPER_3(stctg, void, i32, i64, i32)
+DEF_HELPER_FLAGS_2(tprot, TCG_CALL_CONST, i32, i64, i64)
+DEF_HELPER_FLAGS_1(iske, TCG_CALL_PURE|TCG_CALL_CONST, i64, i64)
+DEF_HELPER_FLAGS_2(sske, TCG_CALL_CONST, void, i32, i64)
+DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_CONST, i32, i32, i64)
+DEF_HELPER_2(csp, i32, i32, i32)
+DEF_HELPER_3(mvcs, i32, i64, i64, i64)
+DEF_HELPER_3(mvcp, i32, i64, i64, i64)
+DEF_HELPER_3(sigp, i32, i64, i32, i64)
+DEF_HELPER_1(sacf, void, i64)
+DEF_HELPER_FLAGS_2(ipte, TCG_CALL_CONST, void, i64, i64)
+DEF_HELPER_FLAGS_0(ptlb, TCG_CALL_CONST, void)
+DEF_HELPER_2(lra, i32, i64, i32)
+DEF_HELPER_2(stura, void, i64, i32)
+DEF_HELPER_2(cksm, void, i32, i32)
+
+DEF_HELPER_FLAGS_4(calc_cc, TCG_CALL_PURE|TCG_CALL_CONST,
+ i32, i32, i64, i64, i64)
+
+#include "def-helper.h"
diff --git a/target-s390x/op_helper.c b/target-s390x/op_helper.c
index 402df2d..d0b7353 100644
--- a/target-s390x/op_helper.c
+++ b/target-s390x/op_helper.c
@@ -1,6 +1,7 @@
/*
* S/390 helper routines
*
+ * Copyright (c) 2009 Ulrich Hecht
* Copyright (c) 2009 Alexander Graf
*
* This library is free software; you can redistribute it and/or
@@ -18,6 +19,11 @@
*/
#include "exec.h"
+#include "host-utils.h"
+#include "helpers.h"
+#include <string.h>
+#include "kvm.h"
+#include "qemu-timer.h"
/*****************************************************************************/
/* Softmmu support */
@@ -64,10 +70,2880 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
cpu_restore_state(tb, env, pc, NULL);
}
}
- /* XXX */
- /* helper_raise_exception_err(env->exception_index, env->error_code); */
+ cpu_loop_exit();
}
env = saved_env;
}
#endif
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* raise an exception */
+void HELPER(exception)(uint32_t excp)
+{
+ HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
+ env->exception_index = excp;
+ cpu_loop_exit();
+}
+
+#ifndef CONFIG_USER_ONLY
+static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
+ uint8_t byte)
+{
+ target_phys_addr_t dest_phys;
+ target_phys_addr_t len = l;
+ void *dest_p;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ int flags;
+
+ if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
+ stb(dest, byte);
+ cpu_abort(env, "should never reach here");
+ }
+ dest_phys |= dest & ~TARGET_PAGE_MASK;
+
+ dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
+
+ memset(dest_p, byte, len);
+
+ cpu_physical_memory_unmap(dest_p, 1, len, len);
+}
+
+static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ target_phys_addr_t dest_phys;
+ target_phys_addr_t src_phys;
+ target_phys_addr_t len = l;
+ void *dest_p;
+ void *src_p;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ int flags;
+
+ if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
+ stb(dest, 0);
+ cpu_abort(env, "should never reach here");
+ }
+ dest_phys |= dest & ~TARGET_PAGE_MASK;
+
+ if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
+ ldub(src);
+ cpu_abort(env, "should never reach here");
+ }
+ src_phys |= src & ~TARGET_PAGE_MASK;
+
+ dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
+ src_p = cpu_physical_memory_map(src_phys, &len, 0);
+
+ memmove(dest_p, src_p, len);
+
+ cpu_physical_memory_unmap(dest_p, 1, len, len);
+ cpu_physical_memory_unmap(src_p, 0, len, len);
+}
+#endif
+
+/* and on array */
+uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+ for (i = 0; i <= l; i++) {
+ x = ldub(dest + i) & ldub(src + i);
+ if (x) {
+ cc = 1;
+ }
+ stb(dest + i, x);
+ }
+ return cc;
+}
+
+/* xor on array */
+uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+
+#ifndef CONFIG_USER_ONLY
+ /* xor with itself is the same as memset(0) */
+ if ((l > 32) && (src == dest) &&
+ (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
+ mvc_fast_memset(env, l + 1, dest, 0);
+ return 0;
+ }
+#else
+ if (src == dest) {
+ memset((void*)dest, 0, l + 1);
+ return 0;
+ }
+#endif
+
+ for (i = 0; i <= l; i++) {
+ x = ldub(dest + i) ^ ldub(src + i);
+ if (x) {
+ cc = 1;
+ }
+ stb(dest + i, x);
+ }
+ return cc;
+}
+
+/* or on array */
+uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+ for (i = 0; i <= l; i++) {
+ x = ldub(dest + i) | ldub(src + i);
+ if (x) {
+ cc = 1;
+ }
+ stb(dest + i, x);
+ }
+ return cc;
+}
+
+/* memmove */
+void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i = 0;
+ int x = 0;
+ uint32_t l_64 = (l + 1) / 8;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+
+#ifndef CONFIG_USER_ONLY
+ if ((l > 32) &&
+ (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
+ (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
+ if (dest == (src + 1)) {
+ mvc_fast_memset(env, l + 1, dest, ldub(src));
+ return;
+ } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
+ mvc_fast_memmove(env, l + 1, dest, src);
+ return;
+ }
+ }
+#else
+ if (dest == (src + 1)) {
+ memset((void*)dest, ldub(src), l + 1);
+ return;
+ } else {
+ memmove((void*)dest, (void*)src, l + 1);
+ return;
+ }
+#endif
+
+ /* handle the parts that fit into 8-byte loads/stores */
+ if (dest != (src + 1)) {
+ for (i = 0; i < l_64; i++) {
+ stq(dest + x, ldq(src + x));
+ x += 8;
+ }
+ }
+
+ /* slow version crossing pages with byte accesses */
+ for (i = x; i <= l; i++) {
+ stb(dest + i, ldub(src + i));
+ }
+}
+
+/* compare unsigned byte arrays */
+uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
+{
+ int i;
+ unsigned char x,y;
+ uint32_t cc;
+ HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
+ __FUNCTION__, l, s1, s2);
+ for (i = 0; i <= l; i++) {
+ x = ldub(s1 + i);
+ y = ldub(s2 + i);
+ HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
+ if (x < y) {
+ cc = 1;
+ goto done;
+ } else if (x > y) {
+ cc = 2;
+ goto done;
+ }
+ }
+ cc = 0;
+done:
+ HELPER_LOG("\n");
+ return cc;
+}
+
+/* compare logical under mask */
+uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
+{
+ uint8_t r,d;
+ uint32_t cc;
+ HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
+ mask, addr);
+ cc = 0;
+ while (mask) {
+ if (mask & 8) {
+ d = ldub(addr);
+ r = (r1 & 0xff000000UL) >> 24;
+ HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
+ addr);
+ if (r < d) {
+ cc = 1;
+ break;
+ } else if (r > d) {
+ cc = 2;
+ break;
+ }
+ addr++;
+ }
+ mask = (mask << 1) & 0xf;
+ r1 <<= 8;
+ }
+ HELPER_LOG("\n");
+ return cc;
+}
+
+/* store character under mask */
+void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
+{
+ uint8_t r;
+ HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
+ addr);
+ while (mask) {
+ if (mask & 8) {
+ r = (r1 & 0xff000000UL) >> 24;
+ stb(addr, r);
+ HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
+ addr++;
+ }
+ mask = (mask << 1) & 0xf;
+ r1 <<= 8;
+ }
+ HELPER_LOG("\n");
+}
+
+/* 64/64 -> 128 unsigned multiplication */
+void HELPER(mlg)(uint32_t r1, uint64_t v2)
+{
+#if HOST_LONG_BITS == 64 && defined(__GNUC__)
+ /* assuming 64-bit hosts have __uint128_t */
+ __uint128_t res = (__uint128_t)env->regs[r1 + 1];
+ res *= (__uint128_t)v2;
+ env->regs[r1] = (uint64_t)(res >> 64);
+ env->regs[r1 + 1] = (uint64_t)res;
+#else
+ mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
+#endif
+}
+
+/* 128 -> 64/64 unsigned division */
+void HELPER(dlg)(uint32_t r1, uint64_t v2)
+{
+ uint64_t divisor = v2;
+
+ if (!env->regs[r1]) {
+ /* 64 -> 64/64 case */
+ env->regs[r1] = env->regs[r1+1] % divisor;
+ env->regs[r1+1] = env->regs[r1+1] / divisor;
+ return;
+ } else {
+
+#if HOST_LONG_BITS == 64 && defined(__GNUC__)
+ /* assuming 64-bit hosts have __uint128_t */
+ __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
+ (env->regs[r1+1]);
+ __uint128_t quotient = dividend / divisor;
+ env->regs[r1+1] = quotient;
+ __uint128_t remainder = dividend % divisor;
+ env->regs[r1] = remainder;
+#else
+ /* 32-bit hosts would need special wrapper functionality - just abort if
+ we encounter such a case; it's very unlikely anyways. */
+ cpu_abort(env, "128 -> 64/64 division not implemented\n");
+#endif
+ }
+}
+
+static inline uint64_t get_address(int x2, int b2, int d2)
+{
+ uint64_t r = d2;
+
+ if (x2) {
+ r += env->regs[x2];
+ }
+
+ if (b2) {
+ r += env->regs[b2];
+ }
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ r &= 0x7fffffff;
+ }
+
+ return r;
+}
+
+static inline uint64_t get_address_31fix(int reg)
+{
+ uint64_t r = env->regs[reg];
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ r &= 0x7fffffff;
+ }
+
+ return r;
+}
+
+/* search string (c is byte to search, r2 is string, r1 end of string) */
+uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
+{
+ uint64_t i;
+ uint32_t cc = 2;
+ uint64_t str = get_address_31fix(r2);
+ uint64_t end = get_address_31fix(r1);
+
+ HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
+ c, env->regs[r1], env->regs[r2]);
+
+ for (i = str; i != end; i++) {
+ if (ldub(i) == c) {
+ env->regs[r1] = i;
+ cc = 1;
+ break;
+ }
+ }
+
+ return cc;
+}
+
+/* unsigned string compare (c is string terminator) */
+uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
+{
+ uint64_t s1 = get_address_31fix(r1);//env->regs[r1];
+ uint64_t s2 = get_address_31fix(r2);//env->regs[r2];
+ uint8_t v1, v2;
+ uint32_t cc;
+ c = c & 0xff;
+#ifdef CONFIG_USER_ONLY
+ if (!c) {
+ HELPER_LOG("%s: comparing '%s' and '%s'\n",
+ __FUNCTION__, (char*)s1, (char*)s2);
+ }
+#endif
+ for (;;) {
+ v1 = ldub(s1);
+ v2 = ldub(s2);
+ if ((v1 == c || v2 == c) || (v1 != v2)) {
+ break;
+ }
+ s1++;
+ s2++;
+ }
+
+ if (v1 == v2) {
+ cc = 0;
+ } else {
+ cc = (v1 < v2) ? 1 : 2;
+ /* FIXME: 31-bit mode! */
+ env->regs[r1] = s1;
+ env->regs[r2] = s2;
+ }
+ return cc;
+}
+
+/* move page */
+void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
+{
+ /* XXX missing r0 handling */
+#ifdef CONFIG_USER_ONLY
+ int i;
+
+ for (i = 0; i < TARGET_PAGE_SIZE; i++) {
+ stb(r1 + i, ldub(r2 + i));
+ }
+#else
+ mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
+#endif
+}
+
+/* string copy (c is string terminator) */
+void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
+{
+ uint64_t dest = get_address_31fix(r1);//env->regs[r1];
+ uint64_t src = get_address_31fix(r2);//env->regs[r2];
+ uint8_t v;
+ c = c & 0xff;
+#ifdef CONFIG_USER_ONLY
+ if (!c) {
+ HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)src, dest);
+ }
+#endif
+ for (;;) {
+ v = ldub(src);
+ stb(dest, v);
+ if (v == c) {
+ break;
+ }
+ src++;
+ dest++;
+ }
+ env->regs[r1] = dest; /* FIXME: 31-bit mode! */
+}
+
+/* compare and swap 64-bit */
+uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ /* FIXME: locking? */
+ uint32_t cc;
+ uint64_t v2 = ldq(a2);
+ if (env->regs[r1] == v2) {
+ cc = 0;
+ stq(a2, env->regs[r3]);
+ } else {
+ cc = 1;
+ env->regs[r1] = v2;
+ }
+ return cc;
+}
+
+/* compare double and swap 64-bit */
+uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ /* FIXME: locking? */
+ uint32_t cc;
+ uint64_t v2_hi = ldq(a2);
+ uint64_t v2_lo = ldq(a2 + 8);
+ uint64_t v1_hi = env->regs[r1];
+ uint64_t v1_lo = env->regs[r1 + 1];
+
+ if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
+ cc = 0;
+ stq(a2, env->regs[r3]);
+ stq(a2 + 8, env->regs[r3 + 1]);
+ } else {
+ cc = 1;
+ env->regs[r1] = v2_hi;
+ env->regs[r1 + 1] = v2_lo;
+ }
+
+ return cc;
+}
+
+/* compare and swap 32-bit */
+uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ /* FIXME: locking? */
+ uint32_t cc;
+ HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
+ uint32_t v2 = ldl(a2);
+ if (((uint32_t)env->regs[r1]) == v2) {
+ cc = 0;
+ stl(a2, (uint32_t)env->regs[r3]);
+ } else {
+ cc = 1;
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
+ }
+ return cc;
+}
+
+static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
+{
+ int pos = 24; /* top of the lower half of r1 */
+ uint64_t rmask = 0xff000000ULL;
+ uint8_t val = 0;
+ int ccd = 0;
+ uint32_t cc = 0;
+
+ while (mask) {
+ if (mask & 8) {
+ env->regs[r1] &= ~rmask;
+ val = ldub(address);
+ if ((val & 0x80) && !ccd) {
+ cc = 1;
+ }
+ ccd = 1;
+ if (val && cc == 0) {
+ cc = 2;
+ }
+ env->regs[r1] |= (uint64_t)val << pos;
+ address++;
+ }
+ mask = (mask << 1) & 0xf;
+ pos -= 8;
+ rmask >>= 8;
+ }
+
+ return cc;
+}
+
+/* execute instruction
+ this instruction executes an insn modified with the contents of r1
+ it does not change the executed instruction in memory
+ it does not change the program counter
+ in other words: tricky...
+ currently implemented by interpreting the cases it is most commonly used in
+ */
+uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
+{
+ uint16_t insn = lduw_code(addr);
+ HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
+ insn);
+ if ((insn & 0xf0ff) == 0xd000) {
+ uint32_t l, insn2, b1, b2, d1, d2;
+ l = v1 & 0xff;
+ insn2 = ldl_code(addr + 2);
+ b1 = (insn2 >> 28) & 0xf;
+ b2 = (insn2 >> 12) & 0xf;
+ d1 = (insn2 >> 16) & 0xfff;
+ d2 = insn2 & 0xfff;
+ switch (insn & 0xf00) {
+ case 0x200:
+ helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
+ break;
+ case 0x500:
+ cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
+ break;
+ case 0x700:
+ cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
+ break;
+ default:
+ goto abort;
+ break;
+ }
+ } else if ((insn & 0xff00) == 0x0a00) {
+ /* supervisor call */
+ HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
+ env->psw.addr = ret - 4;
+ env->int_svc_code = (insn|v1) & 0xff;
+ env->int_svc_ilc = 4;
+ helper_exception(EXCP_SVC);
+ } else if ((insn & 0xff00) == 0xbf00) {
+ uint32_t insn2, r1, r3, b2, d2;
+ insn2 = ldl_code(addr + 2);
+ r1 = (insn2 >> 20) & 0xf;
+ r3 = (insn2 >> 16) & 0xf;
+ b2 = (insn2 >> 12) & 0xf;
+ d2 = insn2 & 0xfff;
+ cc = helper_icm(r1, get_address(0, b2, d2), r3);
+ } else {
+abort:
+ cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
+ insn);
+ }
+ return cc;
+}
+
+/* absolute value 32-bit */
+uint32_t HELPER(abs_i32)(int32_t val)
+{
+ if (val < 0) {
+ return -val;
+ } else {
+ return val;
+ }
+}
+
+/* negative absolute value 32-bit */
+int32_t HELPER(nabs_i32)(int32_t val)
+{
+ if (val < 0) {
+ return val;
+ } else {
+ return -val;
+ }
+}
+
+/* absolute value 64-bit */
+uint64_t HELPER(abs_i64)(int64_t val)
+{
+ HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
+
+ if (val < 0) {
+ return -val;
+ } else {
+ return val;
+ }
+}
+
+/* negative absolute value 64-bit */
+int64_t HELPER(nabs_i64)(int64_t val)
+{
+ if (val < 0) {
+ return val;
+ } else {
+ return -val;
+ }
+}
+
+/* add with carry 32-bit unsigned */
+uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
+{
+ uint32_t res;
+
+ res = v1 + v2;
+ if (cc & 2) {
+ res++;
+ }
+
+ return res;
+}
+
+/* store character under mask high operates on the upper half of r1 */
+void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
+{
+ int pos = 56; /* top of the upper half of r1 */
+
+ while (mask) {
+ if (mask & 8) {
+ stb(address, (env->regs[r1] >> pos) & 0xff);
+ address++;
+ }
+ mask = (mask << 1) & 0xf;
+ pos -= 8;
+ }
+}
+
+/* insert character under mask high; same as icm, but operates on the
+ upper half of r1 */
+uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
+{
+ int pos = 56; /* top of the upper half of r1 */
+ uint64_t rmask = 0xff00000000000000ULL;
+ uint8_t val = 0;
+ int ccd = 0;
+ uint32_t cc = 0;
+
+ while (mask) {
+ if (mask & 8) {
+ env->regs[r1] &= ~rmask;
+ val = ldub(address);
+ if ((val & 0x80) && !ccd) {
+ cc = 1;
+ }
+ ccd = 1;
+ if (val && cc == 0) {
+ cc = 2;
+ }
+ env->regs[r1] |= (uint64_t)val << pos;
+ address++;
+ }
+ mask = (mask << 1) & 0xf;
+ pos -= 8;
+ rmask >>= 8;
+ }
+
+ return cc;
+}
+
+/* insert psw mask and condition code into r1 */
+void HELPER(ipm)(uint32_t cc, uint32_t r1)
+{
+ uint64_t r = env->regs[r1];
+
+ r &= 0xffffffff00ffffffULL;
+ r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
+ env->regs[r1] = r;
+ HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
+ cc, env->psw.mask, r);
+}
+
+/* load access registers r1 to r3 from memory at a2 */
+void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->aregs[i] = ldl(a2);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* store access registers r1 to r3 in memory at a2 */
+void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ stl(a2, env->aregs[i]);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* move long */
+uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
+{
+ uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
+ uint64_t dest = get_address_31fix(r1);
+ uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
+ uint64_t src = get_address_31fix(r2);
+ uint8_t pad = src >> 24;
+ uint8_t v;
+ uint32_t cc;
+
+ if (destlen == srclen) {
+ cc = 0;
+ } else if (destlen < srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+ v = ldub(src);
+ stb(dest, v);
+ }
+
+ for (; destlen; dest++, destlen--) {
+ stb(dest, pad);
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ env->regs[r2 + 1] -= src - env->regs[r2];
+ env->regs[r1] = dest;
+ env->regs[r2] = src;
+
+ return cc;
+}
+
+/* move long extended another memcopy insn with more bells and whistles */
+uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uint64_t destlen = env->regs[r1 + 1];
+ uint64_t dest = env->regs[r1];
+ uint64_t srclen = env->regs[r3 + 1];
+ uint64_t src = env->regs[r3];
+ uint8_t pad = a2 & 0xff;
+ uint8_t v;
+ uint32_t cc;
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ destlen = (uint32_t)destlen;
+ srclen = (uint32_t)srclen;
+ dest &= 0x7fffffff;
+ src &= 0x7fffffff;
+ }
+
+ if (destlen == srclen) {
+ cc = 0;
+ } else if (destlen < srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+ v = ldub(src);
+ stb(dest, v);
+ }
+
+ for (; destlen; dest++, destlen--) {
+ stb(dest, pad);
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ /* FIXME: 31-bit mode! */
+ env->regs[r3 + 1] -= src - env->regs[r3];
+ env->regs[r1] = dest;
+ env->regs[r3] = src;
+
+ return cc;
+}
+
+/* compare logical long extended memcompare insn with padding */
+uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uint64_t destlen = env->regs[r1 + 1];
+ uint64_t dest = get_address_31fix(r1);
+ uint64_t srclen = env->regs[r3 + 1];
+ uint64_t src = get_address_31fix(r3);
+ uint8_t pad = a2 & 0xff;
+ uint8_t v1 = 0,v2 = 0;
+ uint32_t cc = 0;
+
+ if (!(destlen || srclen)) {
+ return cc;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
+ v1 = srclen ? ldub(src) : pad;
+ v2 = destlen ? ldub(dest) : pad;
+ if (v1 != v2) {
+ cc = (v1 < v2) ? 1 : 2;
+ break;
+ }
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ env->regs[r3 + 1] -= src - env->regs[r3];
+ env->regs[r1] = dest;
+ env->regs[r3] = src;
+
+ return cc;
+}
+
+/* subtract unsigned v2 from v1 with borrow */
+uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
+{
+ uint32_t v1 = env->regs[r1];
+ uint32_t res = v1 + (~v2) + (cc >> 1);
+
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
+ if (cc & 2) {
+ /* borrow */
+ return v1 ? 1 : 0;
+ } else {
+ return v1 ? 3 : 2;
+ }
+}
+
+/* subtract unsigned v2 from v1 with borrow */
+uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
+{
+ uint64_t res = v1 + (~v2) + (cc >> 1);
+
+ env->regs[r1] = res;
+ if (cc & 2) {
+ /* borrow */
+ return v1 ? 1 : 0;
+ } else {
+ return v1 ? 3 : 2;
+ }
+}
+
+static inline int float_comp_to_cc(int float_compare)
+{
+ switch (float_compare) {
+ case float_relation_equal:
+ return 0;
+ case float_relation_less:
+ return 1;
+ case float_relation_greater:
+ return 2;
+ case float_relation_unordered:
+ return 3;
+ default:
+ cpu_abort(env, "unknown return value for float compare\n");
+ }
+}
+
+/* condition codes for binary FP ops */
+static uint32_t set_cc_f32(float32 v1, float32 v2)
+{
+ return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
+}
+
+static uint32_t set_cc_f64(float64 v1, float64 v2)
+{
+ return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
+}
+
+/* condition codes for unary FP ops */
+static uint32_t set_cc_nz_f32(float32 v)
+{
+ if (float32_is_any_nan(v)) {
+ return 3;
+ } else if (float32_is_zero(v)) {
+ return 0;
+ } else if (float32_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t set_cc_nz_f64(float64 v)
+{
+ if (float64_is_any_nan(v)) {
+ return 3;
+ } else if (float64_is_zero(v)) {
+ return 0;
+ } else if (float64_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t set_cc_nz_f128(float128 v)
+{
+ if (float128_is_any_nan(v)) {
+ return 3;
+ } else if (float128_is_zero(v)) {
+ return 0;
+ } else if (float128_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+/* convert 32-bit int to 64-bit float */
+void HELPER(cdfbr)(uint32_t f1, int32_t v2)
+{
+ HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
+ env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
+}
+
+/* convert 32-bit int to 128-bit float */
+void HELPER(cxfbr)(uint32_t f1, int32_t v2)
+{
+ CPU_QuadU v1;
+ v1.q = int32_to_float128(v2, &env->fpu_status);
+ env->fregs[f1].ll = v1.ll.upper;
+ env->fregs[f1 + 2].ll = v1.ll.lower;
+}
+
+/* convert 64-bit int to 32-bit float */
+void HELPER(cegbr)(uint32_t f1, int64_t v2)
+{
+ HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
+ env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
+}
+
+/* convert 64-bit int to 64-bit float */
+void HELPER(cdgbr)(uint32_t f1, int64_t v2)
+{
+ HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
+ env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
+}
+
+/* convert 64-bit int to 128-bit float */
+void HELPER(cxgbr)(uint32_t f1, int64_t v2)
+{
+ CPU_QuadU x1;
+ x1.q = int64_to_float128(v2, &env->fpu_status);
+ HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
+ x1.ll.upper, x1.ll.lower);
+ env->fregs[f1].ll = x1.ll.upper;
+ env->fregs[f1 + 2].ll = x1.ll.lower;
+}
+
+/* convert 32-bit int to 32-bit float */
+void HELPER(cefbr)(uint32_t f1, int32_t v2)
+{
+ env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
+ HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
+ env->fregs[f1].l.upper, f1);
+}
+
+/* 32-bit FP addition RR */
+uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+ HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
+ env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
+
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* 64-bit FP addition RR */
+uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+ HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
+ env->fregs[f2].d, env->fregs[f1].d, f1);
+
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* 32-bit FP subtraction RR */
+uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+ HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
+ env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
+
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* 64-bit FP subtraction RR */
+uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+ HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
+ __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
+
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* 32-bit FP division RR */
+void HELPER(debr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+}
+
+/* 128-bit FP division RR */
+void HELPER(dxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_div(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+}
+
+/* 64-bit FP multiplication RR */
+void HELPER(mdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+}
+
+/* 128-bit FP multiplication RR */
+void HELPER(mxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+}
+
+/* convert 32-bit float to 64-bit float */
+void HELPER(ldebr)(uint32_t r1, uint32_t r2)
+{
+ env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
+ &env->fpu_status);
+}
+
+/* convert 128-bit float to 64-bit float */
+void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x2;
+ x2.ll.upper = env->fregs[f2].ll;
+ x2.ll.lower = env->fregs[f2 + 2].ll;
+ env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
+ HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
+}
+
+/* convert 64-bit float to 128-bit float */
+void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU res;
+ res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+}
+
+/* convert 64-bit float to 32-bit float */
+void HELPER(ledbr)(uint32_t f1, uint32_t f2)
+{
+ float64 d2 = env->fregs[f2].d;
+ env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
+}
+
+/* convert 128-bit float to 32-bit float */
+void HELPER(lexbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x2;
+ x2.ll.upper = env->fregs[f2].ll;
+ x2.ll.lower = env->fregs[f2 + 2].ll;
+ env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
+ HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
+}
+
+/* absolute value of 32-bit float */
+uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
+{
+ float32 v1;
+ float32 v2 = env->fregs[f2].d;
+ v1 = float32_abs(v2);
+ env->fregs[f1].d = v1;
+ return set_cc_nz_f32(v1);
+}
+
+/* absolute value of 64-bit float */
+uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
+{
+ float64 v1;
+ float64 v2 = env->fregs[f2].d;
+ v1 = float64_abs(v2);
+ env->fregs[f1].d = v1;
+ return set_cc_nz_f64(v1);
+}
+
+/* absolute value of 128-bit float */
+uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ v1.q = float128_abs(v2.q);
+ env->fregs[f1].ll = v1.ll.upper;
+ env->fregs[f1 + 2].ll = v1.ll.lower;
+ return set_cc_nz_f128(v1.q);
+}
+
+/* load and test 64-bit float */
+uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = env->fregs[f2].d;
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* load and test 32-bit float */
+uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = env->fregs[f2].l.upper;
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* load and test 128-bit float */
+uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x;
+ x.ll.upper = env->fregs[f2].ll;
+ x.ll.lower = env->fregs[f2 + 2].ll;
+ env->fregs[f1].ll = x.ll.upper;
+ env->fregs[f1 + 2].ll = x.ll.lower;
+ return set_cc_nz_f128(x.q);
+}
+
+/* load complement of 32-bit float */
+uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
+
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* load complement of 64-bit float */
+uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_chs(env->fregs[f2].d);
+
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* load complement of 128-bit float */
+uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x1, x2;
+ x2.ll.upper = env->fregs[f2].ll;
+ x2.ll.lower = env->fregs[f2 + 2].ll;
+ x1.q = float128_chs(x2.q);
+ env->fregs[f1].ll = x1.ll.upper;
+ env->fregs[f1 + 2].ll = x1.ll.lower;
+ return set_cc_nz_f128(x1.q);
+}
+
+/* 32-bit FP addition RM */
+void HELPER(aeb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
+ v1, f1, v2.f);
+ env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
+}
+
+/* 32-bit FP division RM */
+void HELPER(deb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
+ v1, f1, v2.f);
+ env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
+}
+
+/* 32-bit FP multiplication RM */
+void HELPER(meeb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
+ v1, f1, v2.f);
+ env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
+}
+
+/* 32-bit FP compare RR */
+uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ float32 v2 = env->fregs[f2].l.upper;;
+ HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
+ v1, f1, v2);
+ return set_cc_f32(v1, v2);
+}
+
+/* 64-bit FP compare RR */
+uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
+{
+ float64 v1 = env->fregs[f1].d;
+ float64 v2 = env->fregs[f2].d;;
+ HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
+ v1, f1, v2);
+ return set_cc_f64(v1, v2);
+}
+
+/* 128-bit FP compare RR */
+uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+
+ return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
+ &env->fpu_status));
+}
+
+/* 64-bit FP compare RM */
+uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
+ f1, v2.d);
+ return set_cc_f64(v1, v2.d);
+}
+
+/* 64-bit FP addition RM */
+uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
+ v1, f1, v2.d);
+ env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
+ return set_cc_nz_f64(v1);
+}
+
+/* 32-bit FP subtraction RM */
+void HELPER(seb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
+}
+
+/* 64-bit FP subtraction RM */
+uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
+ return set_cc_nz_f64(v1);
+}
+
+/* 64-bit FP multiplication RM */
+void HELPER(mdb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
+ v1, f1, v2.d);
+ env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
+}
+
+/* 64-bit FP division RM */
+void HELPER(ddb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
+ v1, f1, v2.d);
+ env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
+}
+
+static void set_round_mode(int m3)
+{
+ switch (m3) {
+ case 0:
+ /* current mode */
+ break;
+ case 1:
+ /* biased round no nearest */
+ case 4:
+ /* round to nearest */
+ set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
+ break;
+ case 5:
+ /* round to zero */
+ set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
+ break;
+ case 6:
+ /* round to +inf */
+ set_float_rounding_mode(float_round_up, &env->fpu_status);
+ break;
+ case 7:
+ /* round to -inf */
+ set_float_rounding_mode(float_round_down, &env->fpu_status);
+ break;
+ }
+}
+
+/* convert 32-bit float to 64-bit int */
+uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float32 v2 = env->fregs[f2].l.upper;
+ set_round_mode(m3);
+ env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
+ return set_cc_nz_f32(v2);
+}
+
+/* convert 64-bit float to 64-bit int */
+uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float64 v2 = env->fregs[f2].d;
+ set_round_mode(m3);
+ env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
+ return set_cc_nz_f64(v2);
+}
+
+/* convert 128-bit float to 64-bit int */
+uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ set_round_mode(m3);
+ env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
+ if (float128_is_any_nan(v2.q)) {
+ return 3;
+ } else if (float128_is_zero(v2.q)) {
+ return 0;
+ } else if (float128_is_neg(v2.q)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+/* convert 32-bit float to 32-bit int */
+uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float32 v2 = env->fregs[f2].l.upper;
+ set_round_mode(m3);
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
+ float32_to_int32(v2, &env->fpu_status);
+ return set_cc_nz_f32(v2);
+}
+
+/* convert 64-bit float to 32-bit int */
+uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float64 v2 = env->fregs[f2].d;
+ set_round_mode(m3);
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
+ float64_to_int32(v2, &env->fpu_status);
+ return set_cc_nz_f64(v2);
+}
+
+/* convert 128-bit float to 32-bit int */
+uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
+ float128_to_int32(v2.q, &env->fpu_status);
+ return set_cc_nz_f128(v2.q);
+}
+
+/* load 32-bit FP zero */
+void HELPER(lzer)(uint32_t f1)
+{
+ env->fregs[f1].l.upper = float32_zero;
+}
+
+/* load 64-bit FP zero */
+void HELPER(lzdr)(uint32_t f1)
+{
+ env->fregs[f1].d = float64_zero;
+}
+
+/* load 128-bit FP zero */
+void HELPER(lzxr)(uint32_t f1)
+{
+ CPU_QuadU x;
+ x.q = float64_to_float128(float64_zero, &env->fpu_status);
+ env->fregs[f1].ll = x.ll.upper;
+ env->fregs[f1 + 1].ll = x.ll.lower;
+}
+
+/* 128-bit FP subtraction RR */
+uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+ return set_cc_nz_f128(res.q);
+}
+
+/* 128-bit FP addition RR */
+uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_add(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+ return set_cc_nz_f128(res.q);
+}
+
+/* 32-bit FP multiplication RR */
+void HELPER(meebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+}
+
+/* 64-bit FP division RR */
+void HELPER(ddbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+}
+
+/* 64-bit FP multiply and add RM */
+void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
+{
+ HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ env->fregs[f1].d = float64_add(env->fregs[f1].d,
+ float64_mul(v2.d, env->fregs[f3].d,
+ &env->fpu_status),
+ &env->fpu_status);
+}
+
+/* 64-bit FP multiply and add RR */
+void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
+{
+ HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
+ env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
+ env->fregs[f3].d,
+ &env->fpu_status),
+ env->fregs[f1].d, &env->fpu_status);
+}
+
+/* 64-bit FP multiply and subtract RR */
+void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
+{
+ HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
+ env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
+ env->fregs[f3].d,
+ &env->fpu_status),
+ env->fregs[f1].d, &env->fpu_status);
+}
+
+/* 32-bit FP multiply and add RR */
+void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
+ float32_mul(env->fregs[f2].l.upper,
+ env->fregs[f3].l.upper,
+ &env->fpu_status),
+ &env->fpu_status);
+}
+
+/* convert 64-bit float to 128-bit float */
+void HELPER(lxdb)(uint32_t f1, uint64_t a2)
+{
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ CPU_QuadU v1;
+ v1.q = float64_to_float128(v2.d, &env->fpu_status);
+ env->fregs[f1].ll = v1.ll.upper;
+ env->fregs[f1 + 2].ll = v1.ll.lower;
+}
+
+/* test data class 32-bit */
+uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ int neg = float32_is_neg(v1);
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
+ if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+ (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+ (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+ (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* test data class 64-bit */
+uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
+{
+ float64 v1 = env->fregs[f1].d;
+ int neg = float64_is_neg(v1);
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
+ if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+ (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+ (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+ (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* test data class 128-bit */
+uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
+{
+ CPU_QuadU v1;
+ uint32_t cc = 0;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+
+ int neg = float128_is_neg(v1.q);
+ if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
+ (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
+ (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
+ (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* find leftmost one */
+uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
+{
+ uint64_t res = 0;
+ uint64_t ov2 = v2;
+
+ while (!(v2 & 0x8000000000000000ULL) && v2) {
+ v2 <<= 1;
+ res++;
+ }
+
+ if (!v2) {
+ env->regs[r1] = 64;
+ env->regs[r1 + 1] = 0;
+ return 0;
+ } else {
+ env->regs[r1] = res;
+ env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
+ return 2;
+ }
+}
+
+/* square root 64-bit RR */
+void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
+}
+
+static inline uint64_t cksm_overflow(uint64_t cksm)
+{
+ if (cksm > 0xffffffffULL) {
+ cksm &= 0xffffffffULL;
+ cksm++;
+ }
+ return cksm;
+}
+
+/* checksum */
+void HELPER(cksm)(uint32_t r1, uint32_t r2)
+{
+ uint64_t src = get_address_31fix(r2);
+ uint64_t src_len = env->regs[(r2 + 1) & 15];
+ uint64_t cksm = 0;
+
+ while (src_len >= 4) {
+ cksm += ldl(src);
+ cksm = cksm_overflow(cksm);
+
+ /* move to next word */
+ src_len -= 4;
+ src += 4;
+ }
+
+ switch (src_len) {
+ case 0:
+ break;
+ case 1:
+ cksm += ldub(src);
+ cksm = cksm_overflow(cksm);
+ break;
+ case 2:
+ cksm += lduw(src);
+ cksm = cksm_overflow(cksm);
+ break;
+ case 3:
+ /* XXX check if this really is correct */
+ cksm += lduw(src) << 8;
+ cksm += ldub(src + 2);
+ cksm = cksm_overflow(cksm);
+ break;
+ }
+
+ /* indicate we've processed everything */
+ env->regs[(r2 + 1) & 15] = 0;
+
+ /* store result */
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (uint32_t)cksm;
+}
+
+static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
+ int32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
+{
+ return cc_calc_ltgt_32(env, dst, 0);
+}
+
+static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
+ int64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
+{
+ return cc_calc_ltgt_64(env, dst, 0);
+}
+
+static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
+ uint32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
+ uint64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
+{
+ HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
+ uint16_t r = val & mask;
+ if (r == 0 || mask == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ return 1;
+ }
+}
+
+/* set condition code for test under mask */
+static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
+{
+ uint16_t r = val & mask;
+ HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
+ if (r == 0 || mask == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ while (!(mask & 0x8000)) {
+ mask <<= 1;
+ val <<= 1;
+ }
+ if (val & 0x8000) {
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
+{
+ return !!dst;
+}
+
+static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
+ int64_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
+ uint64_t ar)
+{
+ if (ar == 0) {
+ if (a1) {
+ return 2;
+ } else {
+ return 0;
+ }
+ } else {
+ if (ar < a1 || ar < a2) {
+ return 3;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
+ int64_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
+ uint64_t ar)
+{
+ if (ar == 0) {
+ return 2;
+ } else {
+ if (a2 > a1) {
+ return 1;
+ } else {
+ return 3;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
+{
+ return !!dst;
+}
+
+static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+
+static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
+ int32_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
+ uint32_t ar)
+{
+ if (ar == 0) {
+ if (a1) {
+ return 2;
+ } else {
+ return 0;
+ }
+ } else {
+ if (ar < a1 || ar < a2) {
+ return 3;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
+ int32_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
+ uint32_t ar)
+{
+ if (ar == 0) {
+ return 2;
+ } else {
+ if (a2 > a1) {
+ return 1;
+ } else {
+ return 3;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
+{
+ return !!dst;
+}
+
+static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+/* calculate condition code for insert character under mask insn */
+static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
+{
+ HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
+ uint32_t cc;
+ if (!val || !mask) {
+ cc = 0;
+ } else {
+ while (mask != 1) {
+ mask >>= 1;
+ val >>= 8;
+ }
+ if (val & 0x80) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+ }
+ return cc;
+}
+
+static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
+ uint64_t dst, uint64_t vr)
+{
+ uint32_t r = 0;
+
+ switch (cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* cc_op value _is_ cc */
+ r = cc_op;
+ break;
+ case CC_OP_LTGT0_32:
+ r = cc_calc_ltgt0_32(env, dst);
+ break;
+ case CC_OP_LTGT0_64:
+ r = cc_calc_ltgt0_64(env, dst);
+ break;
+ case CC_OP_LTGT_32:
+ r = cc_calc_ltgt_32(env, src, dst);
+ break;
+ case CC_OP_LTGT_64:
+ r = cc_calc_ltgt_64(env, src, dst);
+ break;
+ case CC_OP_LTUGTU_32:
+ r = cc_calc_ltugtu_32(env, src, dst);
+ break;
+ case CC_OP_LTUGTU_64:
+ r = cc_calc_ltugtu_64(env, src, dst);
+ break;
+ case CC_OP_TM_32:
+ r = cc_calc_tm_32(env, src, dst);
+ break;
+ case CC_OP_TM_64:
+ r = cc_calc_tm_64(env, src, dst);
+ break;
+ case CC_OP_NZ:
+ r = cc_calc_nz(env, dst);
+ break;
+ case CC_OP_ADD_64:
+ r = cc_calc_add_64(env, src, dst, vr);
+ break;
+ case CC_OP_ADDU_64:
+ r = cc_calc_addu_64(env, src, dst, vr);
+ break;
+ case CC_OP_SUB_64:
+ r = cc_calc_sub_64(env, src, dst, vr);
+ break;
+ case CC_OP_SUBU_64:
+ r = cc_calc_subu_64(env, src, dst, vr);
+ break;
+ case CC_OP_ABS_64:
+ r = cc_calc_abs_64(env, dst);
+ break;
+ case CC_OP_NABS_64:
+ r = cc_calc_nabs_64(env, dst);
+ break;
+ case CC_OP_COMP_64:
+ r = cc_calc_comp_64(env, dst);
+ break;
+
+ case CC_OP_ADD_32:
+ r = cc_calc_add_32(env, src, dst, vr);
+ break;
+ case CC_OP_ADDU_32:
+ r = cc_calc_addu_32(env, src, dst, vr);
+ break;
+ case CC_OP_SUB_32:
+ r = cc_calc_sub_32(env, src, dst, vr);
+ break;
+ case CC_OP_SUBU_32:
+ r = cc_calc_subu_32(env, src, dst, vr);
+ break;
+ case CC_OP_ABS_32:
+ r = cc_calc_abs_64(env, dst);
+ break;
+ case CC_OP_NABS_32:
+ r = cc_calc_nabs_64(env, dst);
+ break;
+ case CC_OP_COMP_32:
+ r = cc_calc_comp_32(env, dst);
+ break;
+
+ case CC_OP_ICM:
+ r = cc_calc_icm_32(env, src, dst);
+ break;
+
+ case CC_OP_LTGT_F32:
+ r = set_cc_f32(src, dst);
+ break;
+ case CC_OP_LTGT_F64:
+ r = set_cc_f64(src, dst);
+ break;
+ case CC_OP_NZ_F32:
+ r = set_cc_nz_f32(dst);
+ break;
+ case CC_OP_NZ_F64:
+ r = set_cc_nz_f64(dst);
+ break;
+
+ default:
+ cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
+ }
+
+ HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
+ cc_name(cc_op), src, dst, vr, r);
+ return r;
+}
+
+uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+uint64_t HELPER(cvd)(int32_t bin)
+{
+ /* positive 0 */
+ uint64_t dec = 0x0c;
+ int shift = 4;
+
+ if (bin < 0) {
+ bin = -bin;
+ dec = 0x0d;
+ }
+
+ for (shift = 4; (shift < 64) && bin; shift += 4) {
+ int current_number = bin % 10;
+
+ dec |= (current_number) << shift;
+ bin /= 10;
+ }
+
+ return dec;
+}
+
+void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
+{
+ int len_dest = len >> 4;
+ int len_src = len & 0xf;
+ uint8_t b;
+ int second_nibble = 0;
+
+ dest += len_dest;
+ src += len_src;
+
+ /* last byte is special, it only flips the nibbles */
+ b = ldub(src);
+ stb(dest, (b << 4) | (b >> 4));
+ src--;
+ len_src--;
+
+ /* now pad every nibble with 0xf0 */
+
+ while (len_dest > 0) {
+ uint8_t cur_byte = 0;
+
+ if (len_src > 0) {
+ cur_byte = ldub(src);
+ }
+
+ len_dest--;
+ dest--;
+
+ /* only advance one nibble at a time */
+ if (second_nibble) {
+ cur_byte >>= 4;
+ len_src--;
+ src--;
+ }
+ second_nibble = !second_nibble;
+
+ /* digit */
+ cur_byte = (cur_byte & 0xf);
+ /* zone bits */
+ cur_byte |= 0xf0;
+
+ stb(dest, cur_byte);
+ }
+}
+
+void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
+{
+ int i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = ldub(array + i);
+ uint8_t new_byte = ldub(trans + byte);
+ stb(array + i, new_byte);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+
+void HELPER(load_psw)(uint64_t mask, uint64_t addr)
+{
+ load_psw(env, mask, addr);
+ cpu_loop_exit();
+}
+
+static void program_interrupt(CPUState *env, uint32_t code, int ilc)
+{
+ qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
+
+ if (kvm_enabled()) {
+ kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
+ } else {
+ env->int_pgm_code = code;
+ env->int_pgm_ilc = ilc;
+ env->exception_index = EXCP_PGM;
+ cpu_loop_exit();
+ }
+}
+
+static void ext_interrupt(CPUState *env, int type, uint32_t param,
+ uint64_t param64)
+{
+ cpu_inject_ext(env, type, param, param64);
+}
+
+int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
+{
+ int r = 0;
+
+#ifdef DEBUG_HELPER
+ printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
+#endif
+
+ if (sccb & ~0x7ffffff8ul) {
+ fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
+ r = -1;
+ goto out;
+ }
+
+ switch(code) {
+ case SCLP_CMDW_READ_SCP_INFO:
+ case SCLP_CMDW_READ_SCP_INFO_FORCED:
+ stw_phys(sccb + SCP_MEM_CODE, ram_size >> 20);
+ stb_phys(sccb + SCP_INCREMENT, 1);
+ stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
+
+ if (kvm_enabled()) {
+#ifdef CONFIG_KVM
+ kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
+ sccb & ~3, 0, 1);
+#endif
+ } else {
+ env->psw.addr += 4;
+ ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
+ }
+ break;
+ default:
+#ifdef DEBUG_HELPER
+ printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
+#endif
+ r = -1;
+ break;
+ }
+
+out:
+ return r;
+}
+
+/* SCLP service call */
+uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
+{
+ if (sclp_service_call(env, r1, r2)) {
+ return 3;
+ }
+
+ return 0;
+}
+
+/* DIAG */
+uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
+{
+ uint64_t r;
+
+ switch (num) {
+ case 0x500:
+ /* KVM hypercall */
+ r = s390_virtio_hypercall(env, mem, code);
+ break;
+ case 0x44:
+ /* yield */
+ r = 0;
+ break;
+ case 0x308:
+ /* ipl */
+ r = 0;
+ break;
+ default:
+ r = -1;
+ break;
+ }
+
+ if (r) {
+ program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
+ }
+
+ return r;
+}
+
+/* Store CPU ID */
+void HELPER(stidp)(uint64_t a1)
+{
+ stq(a1, env->cpu_num);
+}
+
+/* Set Prefix */
+void HELPER(spx)(uint64_t a1)
+{
+ uint32_t prefix;
+
+ prefix = ldl(a1);
+ env->psa = prefix & 0xfffff000;
+ qemu_log("prefix: %#x\n", prefix);
+ tlb_flush_page(env, 0);
+ tlb_flush_page(env, TARGET_PAGE_SIZE);
+}
+
+/* Set Clock */
+uint32_t HELPER(sck)(uint64_t a1)
+{
+ /* XXX not implemented - is it necessary? */
+
+ return 0;
+}
+
+static inline uint64_t clock_value(CPUState *env)
+{
+ uint64_t time;
+
+ time = env->tod_offset +
+ time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
+
+ return time;
+}
+
+/* Store Clock */
+uint32_t HELPER(stck)(uint64_t a1)
+{
+ stq(a1, clock_value(env));
+
+ return 0;
+}
+
+/* Store Clock Extended */
+uint32_t HELPER(stcke)(uint64_t a1)
+{
+ stb(a1, 0);
+ /* basically the same value as stck */
+ stq(a1 + 1, clock_value(env) | env->cpu_num);
+ /* more fine grained than stck */
+ stq(a1 + 9, 0);
+ /* XXX programmable fields */
+ stw(a1 + 17, 0);
+
+
+ return 0;
+}
+
+/* Set Clock Comparator */
+void HELPER(sckc)(uint64_t a1)
+{
+ uint64_t time = ldq(a1);
+
+ if (time == -1ULL) {
+ return;
+ }
+
+ /* difference between now and then */
+ time -= clock_value(env);
+ /* nanoseconds */
+ time = (time * 125) >> 9;
+
+ qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
+}
+
+/* Store Clock Comparator */
+void HELPER(stckc)(uint64_t a1)
+{
+ /* XXX implement */
+ stq(a1, 0);
+}
+
+/* Set CPU Timer */
+void HELPER(spt)(uint64_t a1)
+{
+ uint64_t time = ldq(a1);
+
+ if (time == -1ULL) {
+ return;
+ }
+
+ /* nanoseconds */
+ time = (time * 125) >> 9;
+
+ qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
+}
+
+/* Store CPU Timer */
+void HELPER(stpt)(uint64_t a1)
+{
+ /* XXX implement */
+ stq(a1, 0);
+}
+
+/* Store System Information */
+uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
+{
+ int cc = 0;
+ int sel1, sel2;
+
+ if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
+ ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
+ /* valid function code, invalid reserved bits */
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ }
+
+ sel1 = r0 & STSI_R0_SEL1_MASK;
+ sel2 = r1 & STSI_R1_SEL2_MASK;
+
+ /* XXX: spec exception if sysib is not 4k-aligned */
+
+ switch (r0 & STSI_LEVEL_MASK) {
+ case STSI_LEVEL_1:
+ if ((sel1 == 1) && (sel2 == 1)) {
+ /* Basic Machine Configuration */
+ struct sysib_111 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ ebcdic_put(sysib.manuf, "QEMU ", 16);
+ /* same as machine type number in STORE CPU ID */
+ ebcdic_put(sysib.type, "QEMU", 4);
+ /* same as model number in STORE CPU ID */
+ ebcdic_put(sysib.model, "QEMU ", 16);
+ ebcdic_put(sysib.sequence, "QEMU ", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else if ((sel1 == 2) && (sel2 == 1)) {
+ /* Basic Machine CPU */
+ struct sysib_121 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ /* XXX make different for different CPUs? */
+ ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ stw_p(&sysib.cpu_addr, env->cpu_num);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* Basic Machine CPUs */
+ struct sysib_122 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ stl_p(&sysib.capability, 0x443afc29);
+ /* XXX change when SMP comes */
+ stw_p(&sysib.total_cpus, 1);
+ stw_p(&sysib.active_cpus, 1);
+ stw_p(&sysib.standby_cpus, 0);
+ stw_p(&sysib.reserved_cpus, 0);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else {
+ cc = 3;
+ }
+ break;
+ case STSI_LEVEL_2:
+ {
+ if ((sel1 == 2) && (sel2 == 1)) {
+ /* LPAR CPU */
+ struct sysib_221 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ /* XXX make different for different CPUs? */
+ ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ stw_p(&sysib.cpu_addr, env->cpu_num);
+ stw_p(&sysib.cpu_id, 0);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* LPAR CPUs */
+ struct sysib_222 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ stw_p(&sysib.lpar_num, 0);
+ sysib.lcpuc = 0;
+ /* XXX change when SMP comes */
+ stw_p(&sysib.total_cpus, 1);
+ stw_p(&sysib.conf_cpus, 1);
+ stw_p(&sysib.standby_cpus, 0);
+ stw_p(&sysib.reserved_cpus, 0);
+ ebcdic_put(sysib.name, "QEMU ", 8);
+ stl_p(&sysib.caf, 1000);
+ stw_p(&sysib.dedicated_cpus, 0);
+ stw_p(&sysib.shared_cpus, 0);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else {
+ cc = 3;
+ }
+ break;
+ }
+ case STSI_LEVEL_3:
+ {
+ if ((sel1 == 2) && (sel2 == 2)) {
+ /* VM CPUs */
+ struct sysib_322 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ sysib.count = 1;
+ /* XXX change when SMP comes */
+ stw_p(&sysib.vm[0].total_cpus, 1);
+ stw_p(&sysib.vm[0].conf_cpus, 1);
+ stw_p(&sysib.vm[0].standby_cpus, 0);
+ stw_p(&sysib.vm[0].reserved_cpus, 0);
+ ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
+ stl_p(&sysib.vm[0].caf, 1000);
+ ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else {
+ cc = 3;
+ }
+ break;
+ }
+ case STSI_LEVEL_CURRENT:
+ env->regs[0] = STSI_LEVEL_3;
+ break;
+ default:
+ cc = 3;
+ break;
+ }
+
+ return cc;
+}
+
+void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t src = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->cregs[i] = ldq(src);
+ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
+ i, src, env->cregs[i]);
+ src += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ tlb_flush(env, 1);
+}
+
+void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t src = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
+ src += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ tlb_flush(env, 1);
+}
+
+void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t dest = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ stq(dest, env->cregs[i]);
+ dest += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t dest = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ stl(dest, env->cregs[i]);
+ dest += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
+{
+ /* XXX implement */
+
+ return 0;
+}
+
+/* insert storage key extended */
+uint64_t HELPER(iske)(uint64_t r2)
+{
+ uint64_t addr = get_address(0, 0, r2);
+
+ if (addr > ram_size) {
+ return 0;
+ }
+
+ /* XXX maybe use qemu's internal keys? */
+ return env->storage_keys[addr / TARGET_PAGE_SIZE];
+}
+
+/* set storage key extended */
+void HELPER(sske)(uint32_t r1, uint64_t r2)
+{
+ uint64_t addr = get_address(0, 0, r2);
+
+ if (addr > ram_size) {
+ return;
+ }
+
+ env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
+}
+
+/* reset reference bit extended */
+uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
+{
+ if (r2 > ram_size) {
+ return 0;
+ }
+
+ /* XXX implement */
+#if 0
+ env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED;
+#endif
+
+ /*
+ * cc
+ *
+ * 0 Reference bit zero; change bit zero
+ * 1 Reference bit zero; change bit one
+ * 2 Reference bit one; change bit zero
+ * 3 Reference bit one; change bit one
+ */
+ return 0;
+}
+
+/* compare and swap and purge */
+uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
+{
+ uint32_t cc;
+ uint32_t o1 = env->regs[r1];
+ uint64_t a2 = get_address_31fix(r2) & ~3ULL;
+ uint32_t o2 = ldl(a2);
+
+ if (o1 == o2) {
+ stl(a2, env->regs[(r1 + 1) & 15]);
+ if (env->regs[r2] & 0x3) {
+ /* flush TLB / ALB */
+ tlb_flush(env, 1);
+ }
+ cc = 0;
+ } else {
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
+ cc = 1;
+ }
+
+ return cc;
+}
+
+static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
+ uint64_t mode2)
+{
+ target_ulong src, dest;
+ int flags, cc = 0, i;
+
+ if (!l) {
+ return 0;
+ } else if (l > 256) {
+ /* max 256 */
+ l = 256;
+ cc = 3;
+ }
+
+ if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
+ cpu_loop_exit();
+ }
+ dest |= a1 & ~TARGET_PAGE_MASK;
+
+ if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
+ cpu_loop_exit();
+ }
+ src |= a2 & ~TARGET_PAGE_MASK;
+
+ /* XXX replace w/ memcpy */
+ for (i = 0; i < l; i++) {
+ /* XXX be more clever */
+ if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
+ (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
+ mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
+ break;
+ }
+ stb_phys(dest + i, ldub_phys(src + i));
+ }
+
+ return cc;
+}
+
+uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
+{
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __FUNCTION__, l, a1, a2);
+
+ return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
+}
+
+uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
+{
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __FUNCTION__, l, a1, a2);
+
+ return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
+}
+
+uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
+{
+ int cc = 0;
+
+ HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
+ __FUNCTION__, order_code, r1, cpu_addr);
+
+ /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
+ as parameter (input). Status (output) is always R1. */
+
+ switch (order_code) {
+ case SIGP_SET_ARCH:
+ /* switch arch */
+ break;
+ case SIGP_SENSE:
+ /* enumerate CPU status */
+ if (cpu_addr) {
+ /* XXX implement when SMP comes */
+ return 3;
+ }
+ env->regs[r1] &= 0xffffffff00000000ULL;
+ cc = 1;
+ break;
+ default:
+ /* unknown sigp */
+ fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
+ cc = 3;
+ }
+
+ return cc;
+}
+
+void HELPER(sacf)(uint64_t a1)
+{
+ HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
+
+ switch (a1 & 0xf00) {
+ case 0x000:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_PRIMARY;
+ break;
+ case 0x100:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_SECONDARY;
+ break;
+ case 0x300:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_HOME;
+ break;
+ default:
+ qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ break;
+ }
+}
+
+/* invalidate pte */
+void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
+{
+ uint64_t page = vaddr & TARGET_PAGE_MASK;
+ uint64_t pte = 0;
+
+ /* XXX broadcast to other CPUs */
+
+ /* XXX Linux is nice enough to give us the exact pte address.
+ According to spec we'd have to find it out ourselves */
+ /* XXX Linux is fine with overwriting the pte, the spec requires
+ us to only set the invalid bit */
+ stq_phys(pte_addr, pte | _PAGE_INVALID);
+
+ /* XXX we exploit the fact that Linux passes the exact virtual
+ address here - it's not obliged to! */
+ tlb_flush_page(env, page);
+}
+
+/* flush local tlb */
+void HELPER(ptlb)(void)
+{
+ tlb_flush(env, 1);
+}
+
+/* store using real address */
+void HELPER(stura)(uint64_t addr, uint32_t v1)
+{
+ stw_phys(get_address(0, 0, addr), v1);
+}
+
+/* load real address */
+uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
+{
+ uint32_t cc = 0;
+ int old_exc = env->exception_index;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ uint64_t ret;
+ int flags;
+
+ /* XXX incomplete - has more corner cases */
+ if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
+ program_interrupt(env, PGM_SPECIAL_OP, 2);
+ }
+
+ env->exception_index = old_exc;
+ if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
+ cc = 3;
+ }
+ if (env->exception_index == EXCP_PGM) {
+ ret = env->int_pgm_code | 0x80000000;
+ } else {
+ ret |= addr & ~TARGET_PAGE_MASK;
+ }
+ env->exception_index = old_exc;
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
+ } else {
+ env->regs[r1] = ret;
+ }
+
+ return cc;
+}
+
+#endif
--
1.6.0.2
^ permalink raw reply related [flat|nested] 29+ messages in thread
* [Qemu-devel] [PATCH 18/19] s390x: translate engine for s390x CPU
2011-03-29 13:29 [Qemu-devel] [PATCH 00/19] s390x emulation support Alexander Graf
` (16 preceding siblings ...)
2011-03-29 13:29 ` [Qemu-devel] [PATCH 17/19] s390x: Adjust internal kvm code Alexander Graf
@ 2011-03-29 13:29 ` Alexander Graf
2011-03-29 13:29 ` [Qemu-devel] [PATCH 19/19] s390x: build s390x by default Alexander Graf
` (2 subsequent siblings)
20 siblings, 0 replies; 29+ messages in thread
From: Alexander Graf @ 2011-03-29 13:29 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, aurelien, rth
This is the main meat part of the patch set. It implements emulation for an
s390x CPU.
The code does all the optimizations that are common for TCG code:
- direct branches
- cc optimization
- unrolling of simple microcode loops
I'm still open for suggestions on speedups of course :).
Signed-off-by: Alexander Graf <agraf@suse.de>
---
v1 -> v2:
- don't pass env into DisasContext
- remove if 0'd code
- truncate at 80 chars
- enable disas debug by default (-d in_asm)
- remove explicit psw.addr advancing on SVC
---
target-s390x/translate.c | 5116 +++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 5113 insertions(+), 3 deletions(-)
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index d33bfb1..6756b84 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -2,6 +2,7 @@
* S/390 translation
*
* Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2010 Alexander Graf
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -16,6 +17,22 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+/* #define DEBUG_ILLEGAL_INSTRUCTIONS */
+/* #define DEBUG_INLINE_BRANCHES */
+#define S390X_DEBUG_DISAS
+/* #define S390X_DEBUG_DISAS_VERBOSE */
+
+#ifdef S390X_DEBUG_DISAS_VERBOSE
+# define LOG_DISAS(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
#include "cpu.h"
#include "exec-all.h"
@@ -23,39 +40,5132 @@
#include "tcg-op.h"
#include "qemu-log.h"
+/* global register indexes */
+static TCGv_ptr cpu_env;
+
+#include "gen-icount.h"
+#include "helpers.h"
+#define GEN_HELPER 1
+#include "helpers.h"
+
+typedef struct DisasContext DisasContext;
+struct DisasContext {
+ uint64_t pc;
+ int is_jmp;
+ enum cc_op cc_op;
+ struct TranslationBlock *tb;
+};
+
+#define DISAS_EXCP 4
+
+static void gen_op_calc_cc(DisasContext *s);
+
+#ifdef DEBUG_INLINE_BRANCHES
+static uint64_t inline_branch_hit[CC_OP_MAX];
+static uint64_t inline_branch_miss[CC_OP_MAX];
+#endif
+
+static inline void debug_insn(uint64_t insn)
+{
+ LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
+}
+
+static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
+{
+ if (!(s->tb->flags & FLAG_MASK_64)) {
+ if (s->tb->flags & FLAG_MASK_32) {
+ return pc | 0x80000000;
+ }
+ }
+ return pc;
+}
+
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
int i;
+
for (i = 0; i < 16; i++) {
- cpu_fprintf(f, "R%02d=%016lx", i, env->regs[i]);
+ cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
if ((i % 4) == 3) {
cpu_fprintf(f, "\n");
} else {
cpu_fprintf(f, " ");
}
}
+
for (i = 0; i < 16; i++) {
- cpu_fprintf(f, "F%02d=%016lx", i, (long)env->fregs[i].i);
+ cpu_fprintf(f, "F%02d=%016" PRIx64, i, *(uint64_t *)&env->fregs[i]);
if ((i % 4) == 3) {
cpu_fprintf(f, "\n");
} else {
cpu_fprintf(f, " ");
}
}
- cpu_fprintf(f, "PSW=mask %016lx addr %016lx cc %02x\n", env->psw.mask, env->psw.addr, env->cc);
+
+ cpu_fprintf(f, "\n");
+
+#ifndef CONFIG_USER_ONLY
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+#endif
+
+ cpu_fprintf(f, "\n");
+
+ if (env->cc_op > 3) {
+ cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
+ env->psw.mask, env->psw.addr, cc_name(env->cc_op));
+ } else {
+ cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
+ env->psw.mask, env->psw.addr, env->cc_op);
+ }
+
+#ifdef DEBUG_INLINE_BRANCHES
+ for (i = 0; i < CC_OP_MAX; i++) {
+ cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
+ inline_branch_miss[i], inline_branch_hit[i]);
+ }
+#endif
+}
+
+static TCGv_i64 psw_addr;
+static TCGv_i64 psw_mask;
+
+static TCGv_i32 cc_op;
+static TCGv_i64 cc_src;
+static TCGv_i64 cc_dst;
+static TCGv_i64 cc_vr;
+
+static char cpu_reg_names[10*3 + 6*4];
+static TCGv_i64 regs[16];
+
+static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
+
+void s390x_translate_init(void)
+{
+ int i;
+ size_t cpu_reg_names_size = sizeof(cpu_reg_names);
+ char *p;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ psw_addr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, psw.addr),
+ "psw_addr");
+ psw_mask = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, psw.mask),
+ "psw_mask");
+
+ cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, cc_op),
+ "cc_op");
+ cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_src),
+ "cc_src");
+ cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_dst),
+ "cc_dst");
+ cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_vr),
+ "cc_vr");
+
+ p = cpu_reg_names;
+ for (i = 0; i < 16; i++) {
+ snprintf(p, cpu_reg_names_size, "r%d", i);
+ regs[i] = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, regs[i]), p);
+ p += (i < 10) ? 3 : 4;
+ cpu_reg_names_size -= (i < 10) ? 3 : 4;
+ }
+}
+
+static inline TCGv_i64 load_reg(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_mov_i64(r, regs[reg]);
+ return r;
+}
+
+static inline TCGv_i64 load_freg(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_ld_i64(r, cpu_env, offsetof(CPUState, fregs[reg].d));
+ return r;
+}
+
+static inline TCGv_i32 load_freg32(int reg)
+{
+ TCGv_i32 r = tcg_temp_new_i32();
+ tcg_gen_ld_i32(r, cpu_env, offsetof(CPUState, fregs[reg].l.upper));
+ return r;
+}
+
+static inline TCGv_i32 load_reg32(int reg)
+{
+ TCGv_i32 r = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(r, regs[reg]);
+ return r;
+}
+
+static inline TCGv_i64 load_reg32_i64(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(r, regs[reg]);
+ return r;
+}
+
+static inline void store_reg(int reg, TCGv_i64 v)
+{
+ tcg_gen_mov_i64(regs[reg], v);
+}
+
+static inline void store_freg(int reg, TCGv_i64 v)
+{
+ tcg_gen_st_i64(v, cpu_env, offsetof(CPUState, fregs[reg].d));
+}
+
+static inline void store_reg32(int reg, TCGv_i32 v)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, v);
+ /* 32 bit register writes keep the upper half */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], tmp, 0, 32);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void store_reg32_i64(int reg, TCGv_i64 v)
+{
+ /* 32 bit register writes keep the upper half */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
+}
+
+static inline void store_reg16(int reg, TCGv_i32 v)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, v);
+ /* 16 bit register writes keep the upper bytes */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], tmp, 0, 16);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void store_reg8(int reg, TCGv_i64 v)
+{
+ /* 8 bit register writes keep the upper bytes */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 8);
+}
+
+static inline void store_freg32(int reg, TCGv_i32 v)
+{
+ tcg_gen_st_i32(v, cpu_env, offsetof(CPUState, fregs[reg].l.upper));
+}
+
+static inline void update_psw_addr(DisasContext *s)
+{
+ /* psw.addr */
+ tcg_gen_movi_i64(psw_addr, s->pc);
+}
+
+static inline void potential_page_fault(DisasContext *s)
+{
+#ifndef CONFIG_USER_ONLY
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+#endif
+}
+
+static inline uint64_t ld_code2(uint64_t pc)
+{
+ return (uint64_t)lduw_code(pc);
+}
+
+static inline uint64_t ld_code4(uint64_t pc)
+{
+ return (uint64_t)ldl_code(pc);
+}
+
+static inline uint64_t ld_code6(uint64_t pc)
+{
+ uint64_t opc;
+ opc = (uint64_t)lduw_code(pc) << 32;
+ opc |= (uint64_t)(uint32_t)ldl_code(pc+2);
+ return opc;
+}
+
+static inline int get_mem_index(DisasContext *s)
+{
+ switch (s->tb->flags & FLAG_MASK_ASC) {
+ case PSW_ASC_PRIMARY >> 32:
+ return 0;
+ case PSW_ASC_SECONDARY >> 32:
+ return 1;
+ case PSW_ASC_HOME >> 32:
+ return 2;
+ default:
+ tcg_abort();
+ break;
+ }
+}
+
+static inline void gen_debug(DisasContext *s)
+{
+ TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ gen_helper_exception(tmp);
+ tcg_temp_free_i32(tmp);
+ s->is_jmp = DISAS_EXCP;
+}
+
+#ifdef CONFIG_USER_ONLY
+
+static void gen_illegal_opcode(DisasContext *s, int ilc)
+{
+ TCGv_i32 tmp = tcg_const_i32(EXCP_SPEC);
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ gen_helper_exception(tmp);
+ tcg_temp_free_i32(tmp);
+ s->is_jmp = DISAS_EXCP;
+}
+
+#else /* CONFIG_USER_ONLY */
+
+static void debug_print_inst(DisasContext *s, int ilc)
+{
+#ifdef DEBUG_ILLEGAL_INSTRUCTIONS
+ uint64_t inst = 0;
+
+ switch (ilc & 3) {
+ case 1:
+ inst = ld_code2(s->pc);
+ break;
+ case 2:
+ inst = ld_code4(s->pc);
+ break;
+ case 3:
+ inst = ld_code6(s->pc);
+ break;
+ }
+
+ fprintf(stderr, "Illegal instruction [%d at %016" PRIx64 "]: 0x%016"
+ PRIx64 "\n", ilc, s->pc, inst);
+#endif
+}
+
+static void gen_program_exception(DisasContext *s, int ilc, int code)
+{
+ TCGv_i32 tmp;
+
+ debug_print_inst(s, ilc);
+
+ /* remember what pgm exeption this was */
+ tmp = tcg_const_i32(code);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, int_pgm_code));
+ tcg_temp_free_i32(tmp);
+
+ tmp = tcg_const_i32(ilc);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, int_pgm_ilc));
+ tcg_temp_free_i32(tmp);
+
+ /* advance past instruction */
+ s->pc += (ilc * 2);
+ update_psw_addr(s);
+
+ /* save off cc */
+ gen_op_calc_cc(s);
+
+ /* trigger exception */
+ tmp = tcg_const_i32(EXCP_PGM);
+ gen_helper_exception(tmp);
+ tcg_temp_free_i32(tmp);
+
+ /* end TB here */
+ s->is_jmp = DISAS_EXCP;
+}
+
+
+static void gen_illegal_opcode(DisasContext *s, int ilc)
+{
+ gen_program_exception(s, ilc, PGM_SPECIFICATION);
+}
+
+static void gen_privileged_exception(DisasContext *s, int ilc)
+{
+ gen_program_exception(s, ilc, PGM_PRIVILEGED);
+}
+
+static void check_privileged(DisasContext *s, int ilc)
+{
+ if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
+ gen_privileged_exception(s, ilc);
+ }
+}
+
+#endif /* CONFIG_USER_ONLY */
+
+static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
+{
+ TCGv_i64 tmp;
+
+ /* 31-bitify the immediate part; register contents are dealt with below */
+ if (!(s->tb->flags & FLAG_MASK_64)) {
+ d2 &= 0x7fffffffUL;
+ }
+
+ if (x2) {
+ if (d2) {
+ tmp = tcg_const_i64(d2);
+ tcg_gen_add_i64(tmp, tmp, regs[x2]);
+ } else {
+ tmp = load_reg(x2);
+ }
+ if (b2) {
+ tcg_gen_add_i64(tmp, tmp, regs[b2]);
+ }
+ } else if (b2) {
+ if (d2) {
+ tmp = tcg_const_i64(d2);
+ tcg_gen_add_i64(tmp, tmp, regs[b2]);
+ } else {
+ tmp = load_reg(b2);
+ }
+ } else {
+ tmp = tcg_const_i64(d2);
+ }
+
+ /* 31-bit mode mask if there are values loaded from registers */
+ if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
+ tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
+ }
+
+ return tmp;
+}
+
+static void gen_op_movi_cc(DisasContext *s, uint32_t val)
+{
+ s->cc_op = CC_OP_CONST0 + val;
+}
+
+static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
+{
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
+{
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_extu_i32_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst)
+{
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
+ TCGv_i32 dst)
+{
+ tcg_gen_extu_i32_i64(cc_src, src);
+ tcg_gen_extu_i32_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst, TCGv_i64 vr)
+{
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_mov_i64(cc_vr, vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update3_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
+ TCGv_i32 dst, TCGv_i32 vr)
+{
+ tcg_gen_extu_i32_i64(cc_src, src);
+ tcg_gen_extu_i32_i64(cc_dst, dst);
+ tcg_gen_extu_i32_i64(cc_vr, vr);
+ s->cc_op = op;
+}
+
+static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
+{
+ gen_op_update1_cc_i32(s, CC_OP_NZ, val);
+}
+
+static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ, val);
+}
+
+static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
+ enum cc_op cond)
+{
+ gen_op_update2_cc_i32(s, cond, v1, v2);
+}
+
+static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
+ enum cc_op cond)
+{
+ gen_op_update2_cc_i64(s, cond, v1, v2);
+}
+
+static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
+{
+ cmp_32(s, v1, v2, CC_OP_LTGT_32);
+}
+
+static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
+{
+ cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
+}
+
+static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
+{
+ /* XXX optimize for the constant? put it in s? */
+ TCGv_i32 tmp = tcg_const_i32(v2);
+ cmp_32(s, v1, tmp, CC_OP_LTGT_32);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
+{
+ TCGv_i32 tmp = tcg_const_i32(v2);
+ cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
+{
+ cmp_64(s, v1, v2, CC_OP_LTGT_64);
+}
+
+static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
+{
+ cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
+}
+
+static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
+{
+ TCGv_i64 tmp = tcg_const_i64(v2);
+ cmp_s64(s, v1, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
+{
+ TCGv_i64 tmp = tcg_const_i64(v2);
+ cmp_u64(s, v1, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
+{
+ gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
+}
+
+static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
+}
+
+static void set_cc_add64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADD_64, v1, v2, vr);
+}
+
+static void set_cc_addu64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
+ TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADDU_64, v1, v2, vr);
+}
+
+static void set_cc_sub64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUB_64, v1, v2, vr);
+}
+
+static void set_cc_subu64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
+ TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUBU_64, v1, v2, vr);
+}
+
+static void set_cc_abs64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_ABS_64, v1);
+}
+
+static void set_cc_nabs64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NABS_64, v1);
+}
+
+static void set_cc_add32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_ADD_32, v1, v2, vr);
+}
+
+static void set_cc_addu32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
+ TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_ADDU_32, v1, v2, vr);
+}
+
+static void set_cc_sub32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_SUB_32, v1, v2, vr);
+}
+
+static void set_cc_subu32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
+ TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_SUBU_32, v1, v2, vr);
+}
+
+static void set_cc_abs32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_ABS_32, v1);
+}
+
+static void set_cc_nabs32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_NABS_32, v1);
+}
+
+static void set_cc_comp32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_COMP_32, v1);
+}
+
+static void set_cc_comp64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_COMP_64, v1);
+}
+
+static void set_cc_icm(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, int mask)
+{
+ gen_op_update2_cc_i32(s, CC_OP_ICM, v1, v2);
+}
+
+static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
+{
+ tcg_gen_extu_i32_i64(cc_src, v1);
+ tcg_gen_mov_i64(cc_dst, v2);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = CC_OP_LTGT_F32;
+}
+
+static void set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
+}
+
+static inline void set_cc_nz_f64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F64, v1);
+}
+
+/* CC value is in env->cc_op */
+static inline void set_cc_static(DisasContext *s)
+{
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = CC_OP_STATIC;
+}
+
+static inline void gen_op_set_cc_op(DisasContext *s)
+{
+ if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
+ tcg_gen_movi_i32(cc_op, s->cc_op);
+ }
+}
+
+static inline void gen_update_cc_op(DisasContext *s)
+{
+ gen_op_set_cc_op(s);
+}
+
+/* calculates cc into cc_op */
+static void gen_op_calc_cc(DisasContext *s)
+{
+ TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
+ TCGv_i64 dummy = tcg_const_i64(0);
+
+ switch (s->cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* s->cc_op is the cc value */
+ tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
+ break;
+ case CC_OP_STATIC:
+ /* env->cc_op already is the cc value */
+ break;
+ case CC_OP_NZ:
+ case CC_OP_ABS_64:
+ case CC_OP_NABS_64:
+ case CC_OP_ABS_32:
+ case CC_OP_NABS_32:
+ case CC_OP_LTGT0_32:
+ case CC_OP_LTGT0_64:
+ case CC_OP_COMP_32:
+ case CC_OP_COMP_64:
+ case CC_OP_NZ_F32:
+ case CC_OP_NZ_F64:
+ /* 1 argument */
+ gen_helper_calc_cc(cc_op, local_cc_op, dummy, cc_dst, dummy);
+ break;
+ case CC_OP_ICM:
+ case CC_OP_LTGT_32:
+ case CC_OP_LTGT_64:
+ case CC_OP_LTUGTU_32:
+ case CC_OP_LTUGTU_64:
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ case CC_OP_LTGT_F32:
+ case CC_OP_LTGT_F64:
+ /* 2 arguments */
+ gen_helper_calc_cc(cc_op, local_cc_op, cc_src, cc_dst, dummy);
+ break;
+ case CC_OP_ADD_64:
+ case CC_OP_ADDU_64:
+ case CC_OP_SUB_64:
+ case CC_OP_SUBU_64:
+ case CC_OP_ADD_32:
+ case CC_OP_ADDU_32:
+ case CC_OP_SUB_32:
+ case CC_OP_SUBU_32:
+ /* 3 arguments */
+ gen_helper_calc_cc(cc_op, local_cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ case CC_OP_DYNAMIC:
+ /* unknown operation - assume 3 arguments and cc_op in env */
+ gen_helper_calc_cc(cc_op, cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ default:
+ tcg_abort();
+ }
+
+ tcg_temp_free_i32(local_cc_op);
+
+ /* We now have cc in cc_op as constant */
+ set_cc_static(s);
+}
+
+static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
+{
+ debug_insn(insn);
+
+ *r1 = (insn >> 4) & 0xf;
+ *r2 = insn & 0xf;
+}
+
+static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
+ int *x2, int *b2, int *d2)
+{
+ debug_insn(insn);
+
+ *r1 = (insn >> 20) & 0xf;
+ *x2 = (insn >> 16) & 0xf;
+ *b2 = (insn >> 12) & 0xf;
+ *d2 = insn & 0xfff;
+
+ return get_address(s, *x2, *b2, *d2);
+}
+
+static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
+ int *b2, int *d2)
+{
+ debug_insn(insn);
+
+ *r1 = (insn >> 20) & 0xf;
+ /* aka m3 */
+ *r3 = (insn >> 16) & 0xf;
+ *b2 = (insn >> 12) & 0xf;
+ *d2 = insn & 0xfff;
+}
+
+static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
+ int *b1, int *d1)
+{
+ debug_insn(insn);
+
+ *i2 = (insn >> 16) & 0xff;
+ *b1 = (insn >> 12) & 0xf;
+ *d1 = insn & 0xfff;
+
+ return get_address(s, 0, *b1, *d1);
+}
+
+static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
+{
+ TranslationBlock *tb;
+
+ gen_update_cc_op(s);
+
+ tb = s->tb;
+ /* NOTE: we handle the case where the TB spans two pages here */
+ if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
+ (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
+ /* jump to same page: we can use a direct jump */
+ tcg_gen_goto_tb(tb_num);
+ tcg_gen_movi_i64(psw_addr, pc);
+ tcg_gen_exit_tb((long)tb + tb_num);
+ } else {
+ /* jump to another page: currently not optimized */
+ tcg_gen_movi_i64(psw_addr, pc);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void account_noninline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_miss[cc_op]++;
+#endif
+}
+
+static inline void account_inline_branch(DisasContext *s)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_hit[s->cc_op]++;
+#endif
+}
+
+static void gen_jcc(DisasContext *s, uint32_t mask, int skip)
+{
+ TCGv_i32 tmp, tmp2, r;
+ TCGv_i64 tmp64;
+ int old_cc_op;
+
+ switch (s->cc_op) {
+ case CC_OP_LTGT0_32:
+ tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, cc_dst);
+ switch (mask) {
+ case 0x8 | 0x4: /* dst <= 0 */
+ tcg_gen_brcondi_i32(TCG_COND_GT, tmp, 0, skip);
+ break;
+ case 0x8 | 0x2: /* dst >= 0 */
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, skip);
+ break;
+ case 0x8: /* dst == 0 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip);
+ break;
+ case 0x7: /* dst != 0 */
+ case 0x6: /* dst != 0 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip);
+ break;
+ case 0x4: /* dst < 0 */
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, skip);
+ break;
+ case 0x2: /* dst > 0 */
+ tcg_gen_brcondi_i32(TCG_COND_LE, tmp, 0, skip);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ tcg_temp_free_i32(tmp);
+ break;
+ case CC_OP_LTGT0_64:
+ switch (mask) {
+ case 0x8 | 0x4: /* dst <= 0 */
+ tcg_gen_brcondi_i64(TCG_COND_GT, cc_dst, 0, skip);
+ break;
+ case 0x8 | 0x2: /* dst >= 0 */
+ tcg_gen_brcondi_i64(TCG_COND_LT, cc_dst, 0, skip);
+ break;
+ case 0x8: /* dst == 0 */
+ tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip);
+ break;
+ case 0x7: /* dst != 0 */
+ case 0x6: /* dst != 0 */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip);
+ break;
+ case 0x4: /* dst < 0 */
+ tcg_gen_brcondi_i64(TCG_COND_GE, cc_dst, 0, skip);
+ break;
+ case 0x2: /* dst > 0 */
+ tcg_gen_brcondi_i64(TCG_COND_LE, cc_dst, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_LTGT_32:
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, cc_src);
+ tcg_gen_trunc_i64_i32(tmp2, cc_dst);
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i32(TCG_COND_GT, tmp, tmp2, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i32(TCG_COND_LT, tmp, tmp2, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i32(TCG_COND_GE, tmp, tmp2, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i32(TCG_COND_LE, tmp, tmp2, skip);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case CC_OP_LTGT_64:
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i64(TCG_COND_GT, cc_src, cc_dst, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i64(TCG_COND_LT, cc_src, cc_dst, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i64(TCG_COND_NE, cc_src, cc_dst, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i64(TCG_COND_EQ, cc_src, cc_dst, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i64(TCG_COND_GE, cc_src, cc_dst, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i64(TCG_COND_LE, cc_src, cc_dst, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_LTUGTU_32:
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, cc_src);
+ tcg_gen_trunc_i64_i32(tmp2, cc_dst);
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i32(TCG_COND_GTU, tmp, tmp2, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i32(TCG_COND_LTU, tmp, tmp2, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i32(TCG_COND_GEU, tmp, tmp2, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i32(TCG_COND_LEU, tmp, tmp2, skip);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case CC_OP_LTUGTU_64:
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i64(TCG_COND_GTU, cc_src, cc_dst, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i64(TCG_COND_LTU, cc_src, cc_dst, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i64(TCG_COND_NE, cc_src, cc_dst, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i64(TCG_COND_EQ, cc_src, cc_dst, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i64(TCG_COND_GEU, cc_src, cc_dst, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i64(TCG_COND_LEU, cc_src, cc_dst, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_NZ:
+ switch (mask) {
+ /* dst == 0 || dst != 0 */
+ case 0x8 | 0x4:
+ case 0x8 | 0x4 | 0x2:
+ case 0x8 | 0x4 | 0x2 | 0x1:
+ case 0x8 | 0x4 | 0x1:
+ break;
+ /* dst == 0 */
+ case 0x8:
+ case 0x8 | 0x2:
+ case 0x8 | 0x2 | 0x1:
+ case 0x8 | 0x1:
+ tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip);
+ break;
+ /* dst != 0 */
+ case 0x4:
+ case 0x4 | 0x2:
+ case 0x4 | 0x2 | 0x1:
+ case 0x4 | 0x1:
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_TM_32:
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_i64_i32(tmp, cc_src);
+ tcg_gen_trunc_i64_i32(tmp2, cc_dst);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ switch (mask) {
+ case 0x8: /* val & mask == 0 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* val & mask != 0 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ tcg_temp_free_i32(tmp);
+ account_inline_branch(s);
+ break;
+ case CC_OP_TM_64:
+ tmp64 = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(tmp64, cc_src, cc_dst);
+ switch (mask) {
+ case 0x8: /* val & mask == 0 */
+ tcg_gen_brcondi_i64(TCG_COND_NE, tmp64, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* val & mask != 0 */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, tmp64, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ tcg_temp_free_i64(tmp64);
+ account_inline_branch(s);
+ break;
+ case CC_OP_ICM:
+ switch (mask) {
+ case 0x8: /* val == 0 */
+ tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* val != 0 */
+ case 0x4 | 0x2: /* val != 0 */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_STATIC:
+ old_cc_op = s->cc_op;
+ goto do_dynamic_nocccalc;
+ case CC_OP_DYNAMIC:
+ default:
+do_dynamic:
+ old_cc_op = s->cc_op;
+ /* calculate cc value */
+ gen_op_calc_cc(s);
+
+do_dynamic_nocccalc:
+ /* jump based on cc */
+ account_noninline_branch(s, old_cc_op);
+
+ switch (mask) {
+ case 0x8 | 0x4 | 0x2 | 0x1:
+ /* always true */
+ break;
+ case 0x8 | 0x4 | 0x2: /* cc != 3 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 3, skip);
+ break;
+ case 0x8 | 0x4 | 0x1: /* cc != 2 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 2, skip);
+ break;
+ case 0x8 | 0x2 | 0x1: /* cc != 1 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 1, skip);
+ break;
+ case 0x8 | 0x2: /* cc == 0 || cc == 2 */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cc_op, 1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x8 | 0x4: /* cc < 2 */
+ tcg_gen_brcondi_i32(TCG_COND_GEU, cc_op, 2, skip);
+ break;
+ case 0x8: /* cc == 0 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* cc != 0 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 0, skip);
+ break;
+ case 0x4 | 0x1: /* cc == 1 || cc == 3 */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cc_op, 1);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x4: /* cc == 1 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 1, skip);
+ break;
+ case 0x2 | 0x1: /* cc > 1 */
+ tcg_gen_brcondi_i32(TCG_COND_LEU, cc_op, 1, skip);
+ break;
+ case 0x2: /* cc == 2 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 2, skip);
+ break;
+ case 0x1: /* cc == 3 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 3, skip);
+ break;
+ default: /* cc is masked by something else */
+ tmp = tcg_const_i32(3);
+ /* 3 - cc */
+ tcg_gen_sub_i32(tmp, tmp, cc_op);
+ tmp2 = tcg_const_i32(1);
+ /* 1 << (3 - cc) */
+ tcg_gen_shl_i32(tmp2, tmp2, tmp);
+ r = tcg_const_i32(mask);
+ /* mask & (1 << (3 - cc)) */
+ tcg_gen_and_i32(r, r, tmp2);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+
+ tcg_gen_brcondi_i32(TCG_COND_EQ, r, 0, skip);
+ tcg_temp_free_i32(r);
+ break;
+ }
+ break;
+ }
+}
+
+static void gen_bcr(DisasContext *s, uint32_t mask, TCGv_i64 target,
+ uint64_t offset)
+{
+ int skip;
+
+ if (mask == 0xf) {
+ /* unconditional */
+ tcg_gen_mov_i64(psw_addr, target);
+ tcg_gen_exit_tb(0);
+ } else if (mask == 0) {
+ /* ignore cc and never match */
+ gen_goto_tb(s, 0, offset + 2);
+ } else {
+ TCGv_i64 new_addr = tcg_temp_local_new_i64();
+
+ tcg_gen_mov_i64(new_addr, target);
+ skip = gen_new_label();
+ gen_jcc(s, mask, skip);
+ tcg_gen_mov_i64(psw_addr, new_addr);
+ tcg_temp_free_i64(new_addr);
+ tcg_gen_exit_tb(0);
+ gen_set_label(skip);
+ tcg_temp_free_i64(new_addr);
+ gen_goto_tb(s, 1, offset + 2);
+ }
+}
+
+static void gen_brc(uint32_t mask, DisasContext *s, int32_t offset)
+{
+ int skip;
+
+ if (mask == 0xf) {
+ /* unconditional */
+ gen_goto_tb(s, 0, s->pc + offset);
+ } else if (mask == 0) {
+ /* ignore cc and never match */
+ gen_goto_tb(s, 0, s->pc + 4);
+ } else {
+ skip = gen_new_label();
+ gen_jcc(s, mask, skip);
+ gen_goto_tb(s, 0, s->pc + offset);
+ gen_set_label(skip);
+ gen_goto_tb(s, 1, s->pc + 4);
+ }
+ s->is_jmp = DISAS_TB_JUMP;
+}
+
+static void gen_op_mvc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
+{
+ TCGv_i64 tmp, tmp2;
+ int i;
+ int l_memset = gen_new_label();
+ int l_out = gen_new_label();
+ TCGv_i64 dest = tcg_temp_local_new_i64();
+ TCGv_i64 src = tcg_temp_local_new_i64();
+ TCGv_i32 vl;
+
+ /* Find out if we should use the inline version of mvc */
+ switch (l) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 11:
+ case 15:
+ /* use inline */
+ break;
+ default:
+ /* Fall back to helper */
+ vl = tcg_const_i32(l);
+ potential_page_fault(s);
+ gen_helper_mvc(vl, s1, s2);
+ tcg_temp_free_i32(vl);
+ return;
+ }
+
+ tcg_gen_mov_i64(dest, s1);
+ tcg_gen_mov_i64(src, s2);
+
+ if (!(s->tb->flags & FLAG_MASK_64)) {
+ /* XXX what if we overflow while moving? */
+ tcg_gen_andi_i64(dest, dest, 0x7fffffffUL);
+ tcg_gen_andi_i64(src, src, 0x7fffffffUL);
+ }
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_addi_i64(tmp, src, 1);
+ tcg_gen_brcond_i64(TCG_COND_EQ, dest, tmp, l_memset);
+ tcg_temp_free_i64(tmp);
+
+ switch (l) {
+ case 0:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ case 1:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld16u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st16(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ case 3:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ case 4:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
+ tcg_gen_addi_i64(src, src, 4);
+ tcg_gen_qemu_ld8u(tmp2, src, get_mem_index(s));
+ tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
+ tcg_gen_addi_i64(dest, dest, 4);
+ tcg_gen_qemu_st8(tmp2, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 7:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ /* The inline version can become too big for too uneven numbers, only
+ use it on known good lengths */
+ tmp = tcg_temp_new_i64();
+ for (i = 0; (i + 7) <= l; i += 8) {
+ tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
+
+ tcg_gen_addi_i64(src, src, 8);
+ tcg_gen_addi_i64(dest, dest, 8);
+ }
+
+ for (; i <= l; i++) {
+ tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
+
+ tcg_gen_addi_i64(src, src, 1);
+ tcg_gen_addi_i64(dest, dest, 1);
+ }
+
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+
+ tcg_gen_br(l_out);
+
+ gen_set_label(l_memset);
+ /* memset case (dest == (src + 1)) */
+
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+ /* fill tmp with the byte */
+ tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
+ tcg_gen_shli_i64(tmp2, tmp, 8);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ tcg_gen_shli_i64(tmp2, tmp, 16);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ tcg_gen_shli_i64(tmp2, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ tcg_temp_free_i64(tmp2);
+
+ for (i = 0; (i + 7) <= l; i += 8) {
+ tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
+ tcg_gen_addi_i64(dest, dest, 8);
+ }
+
+ for (; i <= l; i++) {
+ tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
+ tcg_gen_addi_i64(dest, dest, 1);
+ }
+
+ tcg_temp_free_i64(tmp);
+
+ gen_set_label(l_out);
+
+ tcg_temp_free(dest);
+ tcg_temp_free(src);
+}
+
+static void gen_op_clc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
+{
+ TCGv_i64 tmp;
+ TCGv_i64 tmp2;
+ TCGv_i32 vl;
+
+ /* check for simple 32bit or 64bit match */
+ switch (l) {
+ case 0:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld8u(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld8u(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ case 1:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld16u(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld16u(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ case 3:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld32u(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld32u(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ case 7:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld64(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld64(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ }
+
+ potential_page_fault(s);
+ vl = tcg_const_i32(l);
+ gen_helper_clc(cc_op, vl, s1, s2);
+ tcg_temp_free_i32(vl);
+ set_cc_static(s);
+}
+
+static void disas_e3(DisasContext* s, int op, int r1, int x2, int b2, int d2)
+{
+ TCGv_i64 addr, tmp, tmp2, tmp3, tmp4;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+
+ LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
+ op, r1, x2, b2, d2);
+ addr = get_address(s, x2, b2, d2);
+ switch (op) {
+ case 0x2: /* LTG R1,D2(X2,B2) [RXY] */
+ case 0x4: /* lg r1,d2(x2,b2) */
+ tcg_gen_qemu_ld64(regs[r1], addr, get_mem_index(s));
+ if (op == 0x2) {
+ set_cc_s64(s, regs[r1]);
+ }
+ break;
+ case 0x12: /* LT R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ store_reg32(r1, tmp32_1);
+ set_cc_s32(s, tmp32_1);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xc: /* MSG R1,D2(X2,B2) [RXY] */
+ case 0x1c: /* MSGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0xc) {
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ }
+ tcg_gen_mul_i64(regs[r1], regs[r1], tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xd: /* DSG R1,D2(X2,B2) [RXY] */
+ case 0x1d: /* DSGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0x1d) {
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ }
+ tmp4 = load_reg(r1 + 1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_div_i64(tmp3, tmp4, tmp2);
+ store_reg(r1 + 1, tmp3);
+ tcg_gen_rem_i64(tmp3, tmp4, tmp2);
+ store_reg(r1, tmp3);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0x8: /* AG R1,D2(X2,B2) [RXY] */
+ case 0xa: /* ALG R1,D2(X2,B2) [RXY] */
+ case 0x18: /* AGF R1,D2(X2,B2) [RXY] */
+ case 0x1a: /* ALGF R1,D2(X2,B2) [RXY] */
+ if (op == 0x1a) {
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ } else if (op == 0x18) {
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ } else {
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ }
+ tmp4 = load_reg(r1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_add_i64(tmp3, tmp4, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x8:
+ case 0x18:
+ set_cc_add64(s, tmp4, tmp2, tmp3);
+ break;
+ case 0xa:
+ case 0x1a:
+ set_cc_addu64(s, tmp4, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0x9: /* SG R1,D2(X2,B2) [RXY] */
+ case 0xb: /* SLG R1,D2(X2,B2) [RXY] */
+ case 0x19: /* SGF R1,D2(X2,B2) [RXY] */
+ case 0x1b: /* SLGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0x19) {
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ } else if (op == 0x1b) {
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ }
+ tmp4 = load_reg(r1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_sub_i64(tmp3, tmp4, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x9:
+ case 0x19:
+ set_cc_sub64(s, tmp4, tmp2, tmp3);
+ break;
+ case 0xb:
+ case 0x1b:
+ set_cc_subu64(s, tmp4, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ tcg_gen_bswap64_i64(tmp2, tmp2);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x14: /* LGF R1,D2(X2,B2) [RXY] */
+ case 0x16: /* LLGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ if (op == 0x14) {
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ }
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x15: /* LGH R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, addr, get_mem_index(s));
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
+ store_reg16(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x20: /* CG R1,D2(X2,B2) [RXY] */
+ case 0x21: /* CLG R1,D2(X2,B2) */
+ case 0x30: /* CGF R1,D2(X2,B2) [RXY] */
+ case 0x31: /* CLGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ switch (op) {
+ case 0x20:
+ case 0x21:
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ break;
+ case 0x30:
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ break;
+ case 0x31:
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ break;
+ default:
+ tcg_abort();
+ }
+ switch (op) {
+ case 0x20:
+ case 0x30:
+ cmp_s64(s, regs[r1], tmp2);
+ break;
+ case 0x21:
+ case 0x31:
+ cmp_u64(s, regs[r1], tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x24: /* stg r1, d2(x2,b2) */
+ tcg_gen_qemu_st64(regs[r1], addr, get_mem_index(s));
+ break;
+ case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x50: /* STY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x57: /* XY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_gen_xor_i32(tmp32_2, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_2);
+ set_cc_nz_u32(s, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x58: /* LY R1,D2(X2,B2) [RXY] */
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp3, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp3);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x5a: /* AY R1,D2(X2,B2) [RXY] */
+ case 0x5b: /* SY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ tcg_temp_free_i64(tmp2);
+ switch (op) {
+ case 0x5a:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ case 0x5b:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+ switch (op) {
+ case 0x5a:
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5b:
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x71: /* LAY R1,D2(X2,B2) [RXY] */
+ store_reg(r1, addr);
+ break;
+ case 0x72: /* STCY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ tcg_gen_qemu_st8(tmp2, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x73: /* ICY R1,D2(X2,B2) [RXY] */
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp3, addr, get_mem_index(s));
+ store_reg8(r1, tmp3);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x76: /* LB R1,D2(X2,B2) [RXY] */
+ case 0x77: /* LGB R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8s(tmp2, addr, get_mem_index(s));
+ switch (op) {
+ case 0x76:
+ tcg_gen_ext8s_i64(tmp2, tmp2);
+ store_reg32_i64(r1, tmp2);
+ break;
+ case 0x77:
+ tcg_gen_ext8s_i64(tmp2, tmp2);
+ store_reg(r1, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x78: /* LHY R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x80: /* NG R1,D2(X2,B2) [RXY] */
+ case 0x81: /* OG R1,D2(X2,B2) [RXY] */
+ case 0x82: /* XG R1,D2(X2,B2) [RXY] */
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp3, addr, get_mem_index(s));
+ switch (op) {
+ case 0x80:
+ tcg_gen_and_i64(regs[r1], regs[r1], tmp3);
+ break;
+ case 0x81:
+ tcg_gen_or_i64(regs[r1], regs[r1], tmp3);
+ break;
+ case 0x82:
+ tcg_gen_xor_i64(regs[r1], regs[r1], tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ set_cc_nz_u64(s, regs[r1]);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x86: /* MLG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ gen_helper_mlg(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x87: /* DLG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ gen_helper_dlg(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x88: /* ALCG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ tcg_gen_extu_i32_i64(tmp3, cc_op);
+ tcg_gen_shri_i64(tmp3, tmp3, 1);
+ tcg_gen_andi_i64(tmp3, tmp3, 1);
+ tcg_gen_add_i64(tmp3, tmp2, tmp3);
+ tcg_gen_add_i64(tmp3, regs[r1], tmp3);
+ store_reg(r1, tmp3);
+ set_cc_addu64(s, regs[r1], tmp2, tmp3);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x89: /* SLBG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_slbg(cc_op, cc_op, tmp32_1, regs[r1], tmp2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x90: /* LLGC R1,D2(X2,B2) [RXY] */
+ tcg_gen_qemu_ld8u(regs[r1], addr, get_mem_index(s));
+ break;
+ case 0x91: /* LLGH R1,D2(X2,B2) [RXY] */
+ tcg_gen_qemu_ld16u(regs[r1], addr, get_mem_index(s));
+ break;
+ case 0x94: /* LLC R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x95: /* LLH R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x96: /* ML R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x97: /* DL R1,D2(X2,B2) [RXY] */
+ /* reg(r1) = reg(r1, r1+1) % ld32(addr) */
+ /* reg(r1+1) = reg(r1, r1+1) / ld32(addr) */
+ tmp = load_reg(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp3);
+
+ tcg_gen_rem_i64(tmp3, tmp, tmp2);
+ tcg_gen_div_i64(tmp, tmp, tmp2);
+ store_reg32_i64((r1 + 1) & 15, tmp);
+ store_reg32_i64(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x98: /* ALC R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_addc_u32(tmp32_3, cc_op, tmp32_1, tmp32_2);
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ store_reg32(r1, tmp32_3);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x99: /* SLB R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_slb(cc_op, cc_op, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ default:
+ LOG_DISAS("illegal e3 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+ tcg_temp_free_i64(addr);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void disas_e5(DisasContext* s, uint64_t insn)
+{
+ TCGv_i64 tmp, tmp2;
+ int op = (insn >> 32) & 0xff;
+
+ tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
+ tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
+
+ LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
+ switch (op) {
+ case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
+ /* Test Protection */
+ potential_page_fault(s);
+ gen_helper_tprot(cc_op, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ default:
+ LOG_DISAS("illegal e5 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+}
+#endif
+
+static void disas_eb(DisasContext *s, int op, int r1, int r3, int b2, int d2)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2;
+ int i, stm_len;
+ int ilc = 3;
+
+ LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
+ op, r1, r3, b2, d2);
+ switch (op) {
+ case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
+ case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
+ case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
+ case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
+ if (b2) {
+ tmp = get_address(s, 0, b2, d2);
+ tcg_gen_andi_i64(tmp, tmp, 0x3f);
+ } else {
+ tmp = tcg_const_i64(d2 & 0x3f);
+ }
+ switch (op) {
+ case 0xc:
+ tcg_gen_shr_i64(regs[r1], regs[r3], tmp);
+ break;
+ case 0xd:
+ tcg_gen_shl_i64(regs[r1], regs[r3], tmp);
+ break;
+ case 0xa:
+ tcg_gen_sar_i64(regs[r1], regs[r3], tmp);
+ break;
+ case 0x1c:
+ tcg_gen_rotl_i64(regs[r1], regs[r3], tmp);
+ break;
+ default:
+ tcg_abort();
+ break;
+ }
+ if (op == 0xa) {
+ set_cc_s64(s, regs[r1]);
+ }
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
+ if (b2) {
+ tmp = get_address(s, 0, b2, d2);
+ tcg_gen_andi_i64(tmp, tmp, 0x3f);
+ } else {
+ tmp = tcg_const_i64(d2 & 0x3f);
+ }
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = load_reg32(r3);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ switch (op) {
+ case 0x1d:
+ tcg_gen_rotl_i32(tmp32_1, tmp32_2, tmp32_1);
+ break;
+ default:
+ tcg_abort();
+ break;
+ }
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
+ case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
+ case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
+ case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
+ /* Apparently, unrolling lmg/stmg of any size gains performance -
+ even for very long ones... */
+ tmp = get_address(s, 0, b2, d2);
+ for (i = r1;; i = (i + 1) % 16) {
+ switch (op) {
+ case 0x4:
+ tcg_gen_qemu_ld64(regs[i], tmp, get_mem_index(s));
+ stm_len = 8;
+ break;
+ case 0x96:
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = load_reg(i);
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_shli_i64(tmp2, tmp2, 32);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_or_i64(tmp2, tmp2, tmp3);
+ store_reg(i, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ stm_len = 4;
+ break;
+ case 0x24:
+ tcg_gen_qemu_st64(regs[i], tmp, get_mem_index(s));
+ stm_len = 8;
+ break;
+ case 0x26:
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tmp3, regs[i], 32);
+ tcg_gen_qemu_st32(tmp3, tmp, get_mem_index(s));
+ stm_len = 4;
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_gen_addi_i64(tmp, tmp, stm_len);
+ if (i == r3) {
+ break;
+ }
+ }
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stcmh(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
+ /* Load Control */
+ check_privileged(s, ilc);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_lctlg(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
+ /* Store Control */
+ check_privileged(s, ilc);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stctg(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#endif
+ case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ /* XXX rewrite in tcg */
+ gen_helper_csg(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ /* XXX rewrite in tcg */
+ gen_helper_cdsg(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x51: /* TMY D1(B1),I2 [SIY] */
+ tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */
+ tmp2 = tcg_const_i64((r1 << 4) | r3);
+ tcg_gen_qemu_ld8u(tmp, tmp, get_mem_index(s));
+ /* yes, this is a 32 bit operation with 64 bit tcg registers, because
+ that incurs less conversions */
+ cmp_64(s, tmp, tmp2, CC_OP_TM_32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x52: /* MVIY D1(B1),I2 [SIY] */
+ tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */
+ tmp2 = tcg_const_i64((r1 << 4) | r3);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x55: /* CLIY D1(B1),I2 [SIY] */
+ tmp3 = get_address(s, 0, b2, d2); /* SIY -> this is the 1st operand */
+ tmp = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld8u(tmp, tmp3, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ cmp_u32c(s, tmp32_1, (r1 << 4) | r3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x80: /* ICMH R1,M3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ /* XXX split CC calculation out */
+ gen_helper_icmh(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ default:
+ LOG_DISAS("illegal eb operation 0x%x\n", op);
+ gen_illegal_opcode(s, ilc);
+ break;
+ }
+}
+
+static void disas_ed(DisasContext *s, int op, int r1, int x2, int b2, int d2,
+ int r1b)
+{
+ TCGv_i32 tmp_r1, tmp32;
+ TCGv_i64 addr, tmp;
+ addr = get_address(s, x2, b2, d2);
+ tmp_r1 = tcg_const_i32(r1);
+ switch (op) {
+ case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_lxdb(tmp_r1, addr);
+ break;
+ case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = load_freg32(r1);
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ set_cc_cmp_f32_i64(s, tmp32, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_aeb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+
+ tmp32 = load_freg32(r1);
+ set_cc_nz_f32(s, tmp32);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_seb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+
+ tmp32 = load_freg32(r1);
+ set_cc_nz_f32(s, tmp32);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_deb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_tceb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_tcdb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_tcxb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_meeb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_cdb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_adb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_sdb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_mdb(tmp_r1, addr);
+ break;
+ case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_ddb(tmp_r1, addr);
+ break;
+ case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
+ /* for RXF insns, r1 is R3 and r1b is R1 */
+ tmp32 = tcg_const_i32(r1b);
+ potential_page_fault(s);
+ gen_helper_madb(tmp32, addr, tmp_r1);
+ tcg_temp_free_i32(tmp32);
+ break;
+ default:
+ LOG_DISAS("illegal ed operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ return;
+ }
+ tcg_temp_free_i32(tmp_r1);
+ tcg_temp_free_i64(addr);
+}
+
+static void disas_a5(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp, tmp2;
+ TCGv_i32 tmp32;
+ LOG_DISAS("disas_a5: op 0x%x r1 %d i2 0x%x\n", op, r1, i2);
+ switch (op) {
+ case 0x0: /* IIHH R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 48, 16);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x1: /* IIHL R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 32, 16);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x4: /* NIHH R1,I2 [RI] */
+ case 0x8: /* OIHH R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x4:
+ tmp2 = tcg_const_i64((((uint64_t)i2) << 48)
+ | 0x0000ffffffffffffULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0x8:
+ tmp2 = tcg_const_i64(((uint64_t)i2) << 48);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp2, tmp, 48);
+ tcg_gen_trunc_i64_i32(tmp32, tmp2);
+ set_cc_nz_u32(s, tmp32);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x5: /* NIHL R1,I2 [RI] */
+ case 0x9: /* OIHL R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x5:
+ tmp2 = tcg_const_i64((((uint64_t)i2) << 32)
+ | 0xffff0000ffffffffULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0x9:
+ tmp2 = tcg_const_i64(((uint64_t)i2) << 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp2, tmp, 32);
+ tcg_gen_trunc_i64_i32(tmp32, tmp2);
+ tcg_gen_andi_i32(tmp32, tmp32, 0xffff);
+ set_cc_nz_u32(s, tmp32);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x6: /* NILH R1,I2 [RI] */
+ case 0xa: /* OILH R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x6:
+ tmp2 = tcg_const_i64((((uint64_t)i2) << 16)
+ | 0xffffffff0000ffffULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0xa:
+ tmp2 = tcg_const_i64(((uint64_t)i2) << 16);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp, tmp, 16);
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ tcg_gen_andi_i32(tmp32, tmp32, 0xffff);
+ set_cc_nz_u32(s, tmp32);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x7: /* NILL R1,I2 [RI] */
+ case 0xb: /* OILL R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x7:
+ tmp2 = tcg_const_i64(i2 | 0xffffffffffff0000ULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0xb:
+ tmp2 = tcg_const_i64(i2);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ tcg_gen_andi_i32(tmp32, tmp32, 0xffff);
+ set_cc_nz_u32(s, tmp32); /* signedness should not matter here */
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xc: /* LLIHH R1,I2 [RI] */
+ tmp = tcg_const_i64( ((uint64_t)i2) << 48 );
+ store_reg(r1, tmp);
+ break;
+ case 0xd: /* LLIHL R1,I2 [RI] */
+ tmp = tcg_const_i64( ((uint64_t)i2) << 32 );
+ store_reg(r1, tmp);
+ break;
+ case 0xe: /* LLILH R1,I2 [RI] */
+ tmp = tcg_const_i64( ((uint64_t)i2) << 16 );
+ store_reg(r1, tmp);
+ break;
+ case 0xf: /* LLILL R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ store_reg(r1, tmp);
+ break;
+ default:
+ LOG_DISAS("illegal a5 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ return;
+ }
+ tcg_temp_free_i64(tmp);
+}
+
+static void disas_a7(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp, tmp2;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+ int l1;
+
+ LOG_DISAS("disas_a7: op 0x%x r1 %d i2 0x%x\n", op, r1, i2);
+ switch (op) {
+ case 0x0: /* TMLH or TMH R1,I2 [RI] */
+ case 0x1: /* TMLL or TML R1,I2 [RI] */
+ case 0x2: /* TMHH R1,I2 [RI] */
+ case 0x3: /* TMHL R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp2 = tcg_const_i64((uint16_t)i2);
+ switch (op) {
+ case 0x0:
+ tcg_gen_shri_i64(tmp, tmp, 16);
+ break;
+ case 0x1:
+ break;
+ case 0x2:
+ tcg_gen_shri_i64(tmp, tmp, 48);
+ break;
+ case 0x3:
+ tcg_gen_shri_i64(tmp, tmp, 32);
+ break;
+ }
+ tcg_gen_andi_i64(tmp, tmp, 0xffff);
+ cmp_64(s, tmp, tmp2, CC_OP_TM_64);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x4: /* brc m1, i2 */
+ gen_brc(r1, s, i2 * 2LL);
+ return;
+ case 0x5: /* BRAS R1,I2 [RI] */
+ tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 4));
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ gen_goto_tb(s, 0, s->pc + i2 * 2LL);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+ case 0x6: /* BRCT R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_subi_i32(tmp32_1, tmp32_1, 1);
+ store_reg32(r1, tmp32_1);
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp32_1, 0, l1);
+ gen_goto_tb(s, 0, s->pc + (i2 * 2LL));
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, s->pc + 4);
+ s->is_jmp = DISAS_TB_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x7: /* BRCTG R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tcg_gen_subi_i64(tmp, tmp, 1);
+ store_reg(r1, tmp);
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1);
+ gen_goto_tb(s, 0, s->pc + (i2 * 2LL));
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, s->pc + 4);
+ s->is_jmp = DISAS_TB_JUMP;
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x8: /* lhi r1, i2 */
+ tmp32_1 = tcg_const_i32(i2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x9: /* lghi r1, i2 */
+ tmp = tcg_const_i64(i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xa: /* AHI R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_const_i32(i2);
+
+ if (i2 < 0) {
+ tcg_gen_subi_i32(tmp32_2, tmp32_1, -i2);
+ } else {
+ tcg_gen_add_i32(tmp32_2, tmp32_1, tmp32_3);
+ }
+
+ store_reg32(r1, tmp32_2);
+ set_cc_add32(s, tmp32_1, tmp32_3, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xb: /* aghi r1, i2 */
+ tmp = load_reg(r1);
+ tmp2 = tcg_const_i64(i2);
+
+ if (i2 < 0) {
+ tcg_gen_subi_i64(regs[r1], tmp, -i2);
+ } else {
+ tcg_gen_add_i64(regs[r1], tmp, tmp2);
+ }
+ set_cc_add64(s, tmp, tmp2, regs[r1]);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xc: /* MHI R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_muli_i32(tmp32_1, tmp32_1, i2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xd: /* MGHI R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tcg_gen_muli_i64(tmp, tmp, i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xe: /* CHI R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ cmp_s32c(s, tmp32_1, i2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xf: /* CGHI R1,I2 [RI] */
+ tmp = load_reg(r1);
+ cmp_s64c(s, tmp, i2);
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ LOG_DISAS("illegal a7 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ return;
+ }
+}
+
+static void disas_b2(DisasContext *s, int op, uint32_t insn)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+ int r1, r2;
+ int ilc = 2;
+#ifndef CONFIG_USER_ONLY
+ int r3, d2, b2;
+#endif
+
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+
+ LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
+
+ switch (op) {
+ case 0x22: /* IPM R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_op_calc_cc(s);
+ gen_helper_ipm(cc_op, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x41: /* CKSM R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_cksm(tmp32_1, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ gen_op_movi_cc(s, 0);
+ break;
+ case 0x4e: /* SAR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, aregs[r1]));
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x4f: /* EAR R1,R2 [RRE] */
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, aregs[r2]));
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x52: /* MSR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tcg_gen_mul_i32(tmp32_1, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x54: /* MVPG R1,R2 [RRE] */
+ tmp = load_reg(0);
+ tmp2 = load_reg(r1);
+ tmp3 = load_reg(r2);
+ potential_page_fault(s);
+ gen_helper_mvpg(tmp, tmp2, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ /* XXX check CCO bit and set CC accordingly */
+ gen_op_movi_cc(s, 0);
+ break;
+ case 0x55: /* MVST R1,R2 [RRE] */
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = tcg_const_i32(r1);
+ tmp32_3 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_mvst(tmp32_1, tmp32_2, tmp32_3);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ gen_op_movi_cc(s, 1);
+ break;
+ case 0x5d: /* CLST R1,R2 [RRE] */
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = tcg_const_i32(r1);
+ tmp32_3 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_clst(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x5e: /* SRST R1,R2 [RRE] */
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = tcg_const_i32(r1);
+ tmp32_3 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_srst(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+
+#ifndef CONFIG_USER_ONLY
+ case 0x02: /* STIDP D2(B2) [S] */
+ /* Store CPU ID */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stidp(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x04: /* SCK D2(B2) [S] */
+ /* Set Clock */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_sck(cc_op, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x05: /* STCK D2(B2) [S] */
+ /* Store Clock */
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stck(cc_op, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x06: /* SCKC D2(B2) [S] */
+ /* Set Clock Comparator */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_sckc(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x07: /* STCKC D2(B2) [S] */
+ /* Store Clock Comparator */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stckc(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x08: /* SPT D2(B2) [S] */
+ /* Set CPU Timer */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_spt(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x09: /* STPT D2(B2) [S] */
+ /* Store CPU Timer */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stpt(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x0a: /* SPKA D2(B2) [S] */
+ /* Set PSW Key from Address */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
+ tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
+ tcg_gen_or_i64(psw_mask, tmp2, tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x0d: /* PTLB [S] */
+ /* Purge TLB */
+ check_privileged(s, ilc);
+ gen_helper_ptlb();
+ break;
+ case 0x10: /* SPX D2(B2) [S] */
+ /* Set Prefix Register */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_spx(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x11: /* STPX D2(B2) [S] */
+ /* Store Prefix */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUState, psa));
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x12: /* STAP D2(B2) [S] */
+ /* Store CPU Address */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, cpu_num));
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x21: /* IPTE R1,R2 [RRE] */
+ /* Invalidate PTE */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ gen_helper_ipte(tmp, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x29: /* ISKE R1,R2 [RRE] */
+ /* Insert Storage Key Extended */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp = load_reg(r2);
+ tmp2 = tcg_temp_new_i64();
+ gen_helper_iske(tmp2, tmp);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x2a: /* RRBE R1,R2 [RRE] */
+ /* Set Storage Key Extended */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = load_reg32(r1);
+ tmp = load_reg(r2);
+ gen_helper_rrbe(cc_op, tmp32_1, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x2b: /* SSKE R1,R2 [RRE] */
+ /* Set Storage Key Extended */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = load_reg32(r1);
+ tmp = load_reg(r2);
+ gen_helper_sske(tmp32_1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x34: /* STCH ? */
+ /* Store Subchannel */
+ check_privileged(s, ilc);
+ gen_op_movi_cc(s, 3);
+ break;
+ case 0x46: /* STURA R1,R2 [RRE] */
+ /* Store Using Real Address */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = load_reg32(r1);
+ tmp = load_reg(r2);
+ potential_page_fault(s);
+ gen_helper_stura(tmp, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x50: /* CSP R1,R2 [RRE] */
+ /* Compare And Swap And Purge */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ gen_helper_csp(cc_op, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x5f: /* CHSC ? */
+ /* Channel Subsystem Call */
+ check_privileged(s, ilc);
+ gen_op_movi_cc(s, 3);
+ break;
+ case 0x78: /* STCKE D2(B2) [S] */
+ /* Store Clock Extended */
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stcke(cc_op, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x79: /* SACF D2(B2) [S] */
+ /* Store Clock Extended */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_sacf(tmp);
+ tcg_temp_free_i64(tmp);
+ /* addressing mode has changed, so end the block */
+ s->pc += ilc * 2;
+ update_psw_addr(s);
+ s->is_jmp = DISAS_EXCP;
+ break;
+ case 0x7d: /* STSI D2,(B2) [S] */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = load_reg32(1);
+ potential_page_fault(s);
+ gen_helper_stsi(cc_op, tmp, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x9d: /* LFPC D2(B2) [S] */
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xb1: /* STFL D2(B2) [S] */
+ /* Store Facility List (CPU features) at 200 */
+ check_privileged(s, ilc);
+ tmp2 = tcg_const_i64(0xc0000000);
+ tmp = tcg_const_i64(200);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xb2: /* LPSWE D2(B2) [S] */
+ /* Load PSW Extended */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
+ tcg_gen_addi_i64(tmp, tmp, 8);
+ tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
+ gen_helper_load_psw(tmp2, tmp3);
+ /* we need to keep cc_op intact */
+ s->is_jmp = DISAS_JUMP;
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x20: /* SERVC R1,R2 [RRE] */
+ /* SCLP Service call (PV hypercall) */
+ check_privileged(s, ilc);
+ potential_page_fault(s);
+ tmp32_1 = load_reg32(r2);
+ tmp = load_reg(r1);
+ gen_helper_servc(cc_op, tmp32_1, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+#endif
+ default:
+ LOG_DISAS("illegal b2 operation 0x%x\n", op);
+ gen_illegal_opcode(s, ilc);
+ break;
+ }
+}
+
+static void disas_b3(DisasContext *s, int op, int m3, int r1, int r2)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+ LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
+#define FP_HELPER(i) \
+ tmp32_1 = tcg_const_i32(r1); \
+ tmp32_2 = tcg_const_i32(r2); \
+ gen_helper_ ## i (tmp32_1, tmp32_2); \
+ tcg_temp_free_i32(tmp32_1); \
+ tcg_temp_free_i32(tmp32_2);
+
+#define FP_HELPER_CC(i) \
+ tmp32_1 = tcg_const_i32(r1); \
+ tmp32_2 = tcg_const_i32(r2); \
+ gen_helper_ ## i (cc_op, tmp32_1, tmp32_2); \
+ set_cc_static(s); \
+ tcg_temp_free_i32(tmp32_1); \
+ tcg_temp_free_i32(tmp32_2);
+
+ switch (op) {
+ case 0x0: /* LPEBR R1,R2 [RRE] */
+ FP_HELPER_CC(lpebr);
+ break;
+ case 0x2: /* LTEBR R1,R2 [RRE] */
+ FP_HELPER_CC(ltebr);
+ break;
+ case 0x3: /* LCEBR R1,R2 [RRE] */
+ FP_HELPER_CC(lcebr);
+ break;
+ case 0x4: /* LDEBR R1,R2 [RRE] */
+ FP_HELPER(ldebr);
+ break;
+ case 0x5: /* LXDBR R1,R2 [RRE] */
+ FP_HELPER(lxdbr);
+ break;
+ case 0x9: /* CEBR R1,R2 [RRE] */
+ FP_HELPER_CC(cebr);
+ break;
+ case 0xa: /* AEBR R1,R2 [RRE] */
+ FP_HELPER_CC(aebr);
+ break;
+ case 0xb: /* SEBR R1,R2 [RRE] */
+ FP_HELPER_CC(sebr);
+ break;
+ case 0xd: /* DEBR R1,R2 [RRE] */
+ FP_HELPER(debr);
+ break;
+ case 0x10: /* LPDBR R1,R2 [RRE] */
+ FP_HELPER_CC(lpdbr);
+ break;
+ case 0x12: /* LTDBR R1,R2 [RRE] */
+ FP_HELPER_CC(ltdbr);
+ break;
+ case 0x13: /* LCDBR R1,R2 [RRE] */
+ FP_HELPER_CC(lcdbr);
+ break;
+ case 0x15: /* SQBDR R1,R2 [RRE] */
+ FP_HELPER(sqdbr);
+ break;
+ case 0x17: /* MEEBR R1,R2 [RRE] */
+ FP_HELPER(meebr);
+ break;
+ case 0x19: /* CDBR R1,R2 [RRE] */
+ FP_HELPER_CC(cdbr);
+ break;
+ case 0x1a: /* ADBR R1,R2 [RRE] */
+ FP_HELPER_CC(adbr);
+ break;
+ case 0x1b: /* SDBR R1,R2 [RRE] */
+ FP_HELPER_CC(sdbr);
+ break;
+ case 0x1c: /* MDBR R1,R2 [RRE] */
+ FP_HELPER(mdbr);
+ break;
+ case 0x1d: /* DDBR R1,R2 [RRE] */
+ FP_HELPER(ddbr);
+ break;
+ case 0xe: /* MAEBR R1,R3,R2 [RRF] */
+ case 0x1e: /* MADBR R1,R3,R2 [RRF] */
+ case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
+ /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
+ tmp32_1 = tcg_const_i32(m3);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(r1);
+ switch (op) {
+ case 0xe:
+ gen_helper_maebr(tmp32_1, tmp32_3, tmp32_2);
+ break;
+ case 0x1e:
+ gen_helper_madbr(tmp32_1, tmp32_3, tmp32_2);
+ break;
+ case 0x1f:
+ gen_helper_msdbr(tmp32_1, tmp32_3, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x40: /* LPXBR R1,R2 [RRE] */
+ FP_HELPER_CC(lpxbr);
+ break;
+ case 0x42: /* LTXBR R1,R2 [RRE] */
+ FP_HELPER_CC(ltxbr);
+ break;
+ case 0x43: /* LCXBR R1,R2 [RRE] */
+ FP_HELPER_CC(lcxbr);
+ break;
+ case 0x44: /* LEDBR R1,R2 [RRE] */
+ FP_HELPER(ledbr);
+ break;
+ case 0x45: /* LDXBR R1,R2 [RRE] */
+ FP_HELPER(ldxbr);
+ break;
+ case 0x46: /* LEXBR R1,R2 [RRE] */
+ FP_HELPER(lexbr);
+ break;
+ case 0x49: /* CXBR R1,R2 [RRE] */
+ FP_HELPER_CC(cxbr);
+ break;
+ case 0x4a: /* AXBR R1,R2 [RRE] */
+ FP_HELPER_CC(axbr);
+ break;
+ case 0x4b: /* SXBR R1,R2 [RRE] */
+ FP_HELPER_CC(sxbr);
+ break;
+ case 0x4c: /* MXBR R1,R2 [RRE] */
+ FP_HELPER(mxbr);
+ break;
+ case 0x4d: /* DXBR R1,R2 [RRE] */
+ FP_HELPER(dxbr);
+ break;
+ case 0x65: /* LXR R1,R2 [RRE] */
+ tmp = load_freg(r2);
+ store_freg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ tmp = load_freg(r2 + 2);
+ store_freg(r1 + 2, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x74: /* LZER R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_lzer(tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x75: /* LZDR R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_lzdr(tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x76: /* LZXR R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_lzxr(tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x84: /* SFPC R1 [RRE] */
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x8c: /* EFPC R1 [RRE] */
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x94: /* CEFBR R1,R2 [RRE] */
+ case 0x95: /* CDFBR R1,R2 [RRE] */
+ case 0x96: /* CXFBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = load_reg32(r2);
+ switch (op) {
+ case 0x94:
+ gen_helper_cefbr(tmp32_1, tmp32_2);
+ break;
+ case 0x95:
+ gen_helper_cdfbr(tmp32_1, tmp32_2);
+ break;
+ case 0x96:
+ gen_helper_cxfbr(tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x98: /* CFEBR R1,R2 [RRE] */
+ case 0x99: /* CFDBR R1,R2 [RRE] */
+ case 0x9a: /* CFXBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ switch (op) {
+ case 0x98:
+ gen_helper_cfebr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x99:
+ gen_helper_cfdbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x9a:
+ gen_helper_cfxbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xa4: /* CEGBR R1,R2 [RRE] */
+ case 0xa5: /* CDGBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp = load_reg(r2);
+ switch (op) {
+ case 0xa4:
+ gen_helper_cegbr(tmp32_1, tmp);
+ break;
+ case 0xa5:
+ gen_helper_cdgbr(tmp32_1, tmp);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xa6: /* CXGBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp = load_reg(r2);
+ gen_helper_cxgbr(tmp32_1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xa8: /* CGEBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ gen_helper_cgebr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xa9: /* CGDBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ gen_helper_cgdbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xaa: /* CGXBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ gen_helper_cgxbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ default:
+ LOG_DISAS("illegal b3 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ break;
+ }
+
+#undef FP_HELPER_CC
+#undef FP_HELPER
+}
+
+static void disas_b9(DisasContext *s, int op, int r1, int r2)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+
+ LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
+ switch (op) {
+ case 0x0: /* LPGR R1,R2 [RRE] */
+ case 0x1: /* LNGR R1,R2 [RRE] */
+ case 0x2: /* LTGR R1,R2 [RRE] */
+ case 0x3: /* LCGR R1,R2 [RRE] */
+ case 0x10: /* LPGFR R1,R2 [RRE] */
+ case 0x11: /* LNFGR R1,R2 [RRE] */
+ case 0x12: /* LTGFR R1,R2 [RRE] */
+ case 0x13: /* LCGFR R1,R2 [RRE] */
+ if (op & 0x10) {
+ tmp = load_reg32_i64(r2);
+ } else {
+ tmp = load_reg(r2);
+ }
+ switch (op & 0xf) {
+ case 0x0: /* LP?GR */
+ set_cc_abs64(s, tmp);
+ gen_helper_abs_i64(tmp, tmp);
+ store_reg(r1, tmp);
+ break;
+ case 0x1: /* LN?GR */
+ set_cc_nabs64(s, tmp);
+ gen_helper_nabs_i64(tmp, tmp);
+ store_reg(r1, tmp);
+ break;
+ case 0x2: /* LT?GR */
+ if (r1 != r2) {
+ store_reg(r1, tmp);
+ }
+ set_cc_s64(s, tmp);
+ break;
+ case 0x3: /* LC?GR */
+ tcg_gen_neg_i64(regs[r1], tmp);
+ set_cc_comp64(s, regs[r1]);
+ break;
+ }
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x4: /* LGR R1,R2 [RRE] */
+ store_reg(r1, regs[r2]);
+ break;
+ case 0x6: /* LGBR R1,R2 [RRE] */
+ tmp2 = load_reg(r2);
+ tcg_gen_ext8s_i64(tmp2, tmp2);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x8: /* AGR R1,R2 [RRE] */
+ case 0xa: /* ALGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_add_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x8:
+ set_cc_add64(s, tmp, tmp2, tmp3);
+ break;
+ case 0xa:
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x9: /* SGR R1,R2 [RRE] */
+ case 0xb: /* SLGR R1,R2 [RRE] */
+ case 0x1b: /* SLGFR R1,R2 [RRE] */
+ case 0x19: /* SGFR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ switch (op) {
+ case 0x1b:
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x19:
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ default:
+ tmp2 = load_reg(r2);
+ break;
+ }
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_sub_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x9:
+ case 0x19:
+ set_cc_sub64(s, tmp, tmp2, tmp3);
+ break;
+ case 0xb:
+ case 0x1b:
+ set_cc_subu64(s, tmp, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0xc: /* MSGR R1,R2 [RRE] */
+ case 0x1c: /* MSGFR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ if (op == 0x1c) {
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ }
+ tcg_gen_mul_i64(tmp, tmp, tmp2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xd: /* DSGR R1,R2 [RRE] */
+ case 0x1d: /* DSGFR R1,R2 [RRE] */
+ tmp = load_reg(r1 + 1);
+ if (op == 0xd) {
+ tmp2 = load_reg(r2);
+ } else {
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ }
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_div_i64(tmp3, tmp, tmp2);
+ store_reg(r1 + 1, tmp3);
+ tcg_gen_rem_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x14: /* LGFR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp, tmp32_1);
+ store_reg(r1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x16: /* LLGFR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ store_reg(r1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x17: /* LLGTR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ store_reg(r1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x18: /* AGFR R1,R2 [RRE] */
+ case 0x1a: /* ALGFR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0x18) {
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ } else {
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tmp = load_reg(r1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_add_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ if (op == 0x18) {
+ set_cc_add64(s, tmp, tmp2, tmp3);
+ } else {
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x1f: /* LRVR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x20: /* CGR R1,R2 [RRE] */
+ case 0x30: /* CGFR R1,R2 [RRE] */
+ tmp2 = load_reg(r2);
+ if (op == 0x30) {
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ }
+ tmp = load_reg(r1);
+ cmp_s64(s, tmp, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x21: /* CLGR R1,R2 [RRE] */
+ case 0x31: /* CLGFR R1,R2 [RRE] */
+ tmp2 = load_reg(r2);
+ if (op == 0x31) {
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ }
+ tmp = load_reg(r1);
+ cmp_u64(s, tmp, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x26: /* LBR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_ext8s_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x27: /* LHR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_ext16s_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x80: /* NGR R1,R2 [RRE] */
+ case 0x81: /* OGR R1,R2 [RRE] */
+ case 0x82: /* XGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ switch (op) {
+ case 0x80:
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0x81:
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ case 0x82:
+ tcg_gen_xor_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ set_cc_nz_u64(s, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x83: /* FLOGR R1,R2 [RRE] */
+ tmp = load_reg(r2);
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_flogr(cc_op, tmp32_1, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x84: /* LLGCR R1,R2 [RRE] */
+ tmp = load_reg(r2);
+ tcg_gen_andi_i64(tmp, tmp, 0xff);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x85: /* LLGHR R1,R2 [RRE] */
+ tmp = load_reg(r2);
+ tcg_gen_andi_i64(tmp, tmp, 0xffff);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x87: /* DLGR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp = load_reg(r2);
+ gen_helper_dlg(tmp32_1, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x88: /* ALCGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp3 = tcg_temp_new_i64();
+ gen_op_calc_cc(s);
+ tcg_gen_extu_i32_i64(tmp3, cc_op);
+ tcg_gen_shri_i64(tmp3, tmp3, 1);
+ tcg_gen_andi_i64(tmp3, tmp3, 1);
+ tcg_gen_add_i64(tmp3, tmp2, tmp3);
+ tcg_gen_add_i64(tmp3, tmp, tmp3);
+ store_reg(r1, tmp3);
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x89: /* SLBGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp32_1 = tcg_const_i32(r1);
+ gen_op_calc_cc(s);
+ gen_helper_slbg(cc_op, cc_op, tmp32_1, tmp, tmp2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x94: /* LLCR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, 0xff);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x95: /* LLHR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, 0xffff);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x96: /* MLR R1,R2 [RRE] */
+ /* reg(r1, r1+1) = reg(r1+1) * reg(r2) */
+ tmp2 = load_reg(r2);
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x97: /* DLR R1,R2 [RRE] */
+ /* reg(r1) = reg(r1, r1+1) % reg(r2) */
+ /* reg(r1+1) = reg(r1, r1+1) / reg(r2) */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp3);
+
+ tcg_gen_rem_i64(tmp3, tmp, tmp2);
+ tcg_gen_div_i64(tmp, tmp, tmp2);
+ store_reg32_i64((r1 + 1) & 15, tmp);
+ store_reg32_i64(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x98: /* ALCR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tmp32_3 = tcg_temp_new_i32();
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_addc_u32(tmp32_3, cc_op, tmp32_1, tmp32_2);
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ store_reg32(r1, tmp32_3);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x99: /* SLBR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp32_2 = tcg_const_i32(r1);
+ gen_op_calc_cc(s);
+ gen_helper_slb(cc_op, cc_op, tmp32_2, tmp32_1);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ default:
+ LOG_DISAS("illegal b9 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ break;
+ }
+}
+
+static void disas_c0(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 tmp32_1, tmp32_2;
+ uint64_t target = s->pc + i2 * 2LL;
+ int l1;
+
+ LOG_DISAS("disas_c0: op 0x%x r1 %d i2 %d\n", op, r1, i2);
+
+ switch (op) {
+ case 0: /* larl r1, i2 */
+ tmp = tcg_const_i64(target);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x1: /* LGFI R1,I2 [RIL] */
+ tmp = tcg_const_i64((int64_t)i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x4: /* BRCL M1,I2 [RIL] */
+ /* m1 & (1 << (3 - cc)) */
+ tmp32_1 = tcg_const_i32(3);
+ tmp32_2 = tcg_const_i32(1);
+ gen_op_calc_cc(s);
+ tcg_gen_sub_i32(tmp32_1, tmp32_1, cc_op);
+ tcg_gen_shl_i32(tmp32_2, tmp32_2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tmp32_1 = tcg_const_i32(r1); /* m1 == r1 */
+ tcg_gen_and_i32(tmp32_1, tmp32_1, tmp32_2);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp32_1, 0, l1);
+ gen_goto_tb(s, 0, target);
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, s->pc + 6);
+ s->is_jmp = DISAS_TB_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x5: /* brasl r1, i2 */
+ tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 6));
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ gen_goto_tb(s, 0, target);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+ case 0x7: /* XILF R1,I2 [RIL] */
+ case 0xb: /* NILF R1,I2 [RIL] */
+ case 0xd: /* OILF R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ switch (op) {
+ case 0x7:
+ tcg_gen_xori_i32(tmp32_1, tmp32_1, (uint32_t)i2);
+ break;
+ case 0xb:
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, (uint32_t)i2);
+ break;
+ case 0xd:
+ tcg_gen_ori_i32(tmp32_1, tmp32_1, (uint32_t)i2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_1);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x9: /* IILF R1,I2 [RIL] */
+ tmp32_1 = tcg_const_i32((uint32_t)i2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xa: /* NIHF R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_andi_i64(tmp, tmp, (((uint64_t)((uint32_t)i2)) << 32)
+ | 0xffffffffULL);
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp, tmp, 32);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xe: /* LLIHF R1,I2 [RIL] */
+ tmp = tcg_const_i64(((uint64_t)(uint32_t)i2) << 32);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xf: /* LLILF R1,I2 [RIL] */
+ tmp = tcg_const_i64((uint32_t)i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ LOG_DISAS("illegal c0 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+}
+
+static void disas_c2(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+
+ switch (op) {
+ case 0x4: /* SLGFI R1,I2 [RIL] */
+ case 0xa: /* ALGFI R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ tmp2 = tcg_const_i64((uint64_t)(uint32_t)i2);
+ tmp3 = tcg_temp_new_i64();
+ switch (op) {
+ case 0x4:
+ tcg_gen_sub_i64(tmp3, tmp, tmp2);
+ set_cc_subu64(s, tmp, tmp2, tmp3);
+ break;
+ case 0xa:
+ tcg_gen_add_i64(tmp3, tmp, tmp2);
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x5: /* SLFI R1,I2 [RIL] */
+ case 0xb: /* ALFI R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_const_i32(i2);
+ tmp32_3 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x5:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0xb:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xc: /* CGFI R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ cmp_s64c(s, tmp, (int64_t)i2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xe: /* CLGFI R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ cmp_u64c(s, tmp, (uint64_t)(uint32_t)i2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xd: /* CFI R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ cmp_s32c(s, tmp32_1, i2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xf: /* CLFI R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ cmp_u32c(s, tmp32_1, i2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ default:
+ LOG_DISAS("illegal c2 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+}
+
+static void gen_and_or_xor_i32(int opc, TCGv_i32 tmp, TCGv_i32 tmp2)
+{
+ switch (opc & 0xf) {
+ case 0x4:
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ break;
+ case 0x6:
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ break;
+ case 0x7:
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+static void disas_s390_insn(DisasContext *s)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+ unsigned char opc;
+ uint64_t insn;
+ int op, r1, r2, r3, d1, d2, x2, b1, b2, i, i2, r1b;
+ TCGv_i32 vl;
+ int ilc;
+ int l1;
+
+ opc = ldub_code(s->pc);
+ LOG_DISAS("opc 0x%x\n", opc);
+
+ ilc = get_ilc(opc);
+
+ switch (opc) {
+#ifndef CONFIG_USER_ONLY
+ case 0x01: /* SAM */
+ insn = ld_code2(s->pc);
+ /* set addressing mode, but we only do 64bit anyways */
+ break;
+#endif
+ case 0x6: /* BCTR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_subi_i32(tmp32_1, tmp32_1, 1);
+ store_reg32(r1, tmp32_1);
+
+ if (r2) {
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp32_1, 0, l1);
+
+ /* not taking the branch, jump to after the instruction */
+ gen_goto_tb(s, 0, s->pc + 2);
+ gen_set_label(l1);
+
+ /* take the branch, move R2 into psw.addr */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_mov_i64(psw_addr, tmp);
+ s->is_jmp = DISAS_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ }
+ break;
+ case 0x7: /* BCR M1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ if (r2) {
+ tmp = load_reg(r2);
+ gen_bcr(s, r1, tmp, s->pc);
+ tcg_temp_free_i64(tmp);
+ s->is_jmp = DISAS_TB_JUMP;
+ } else {
+ /* XXX: "serialization and checkpoint-synchronization function"? */
+ }
+ break;
+ case 0xa: /* SVC I [RR] */
+ insn = ld_code2(s->pc);
+ debug_insn(insn);
+ i = insn & 0xff;
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ tmp32_1 = tcg_const_i32(i);
+ tmp32_2 = tcg_const_i32(ilc * 2);
+ tmp32_3 = tcg_const_i32(EXCP_SVC);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, int_svc_code));
+ tcg_gen_st_i32(tmp32_2, cpu_env, offsetof(CPUState, int_svc_ilc));
+ gen_helper_exception(tmp32_3);
+ s->is_jmp = DISAS_EXCP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xd: /* BASR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 2));
+ store_reg(r1, tmp);
+ if (r2) {
+ tmp2 = load_reg(r2);
+ tcg_gen_mov_i64(psw_addr, tmp2);
+ tcg_temp_free_i64(tmp2);
+ s->is_jmp = DISAS_JUMP;
+ }
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xe: /* MVCL R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_mvcl(cc_op, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x10: /* LPR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ set_cc_abs32(s, tmp32_1);
+ gen_helper_abs_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x11: /* LNR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ set_cc_nabs32(s, tmp32_1);
+ gen_helper_nabs_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x12: /* LTR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ if (r1 != r2) {
+ store_reg32(r1, tmp32_1);
+ }
+ set_cc_s32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x13: /* LCR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_neg_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ set_cc_comp32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x14: /* NR R1,R2 [RR] */
+ case 0x16: /* OR R1,R2 [RR] */
+ case 0x17: /* XR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_2 = load_reg32(r2);
+ tmp32_1 = load_reg32(r1);
+ gen_and_or_xor_i32(opc, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x18: /* LR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x15: /* CLR R1,R2 [RR] */
+ case 0x19: /* CR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ if (opc == 0x15) {
+ cmp_u32(s, tmp32_1, tmp32_2);
+ } else {
+ cmp_s32(s, tmp32_1, tmp32_2);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x1a: /* AR R1,R2 [RR] */
+ case 0x1e: /* ALR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_3);
+ if (opc == 0x1a) {
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ } else {
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x1b: /* SR R1,R2 [RR] */
+ case 0x1f: /* SLR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_3);
+ if (opc == 0x1b) {
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ } else {
+ set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x1c: /* MR R1,R2 [RR] */
+ /* reg(r1, r1+1) = reg(r1+1) * reg(r2) */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp2 = load_reg(r2);
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ tcg_gen_ext32s_i64(tmp3, tmp3);
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x1d: /* DR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r1 + 1);
+ tmp32_3 = load_reg32(r2);
+
+ tmp = tcg_temp_new_i64(); /* dividend */
+ tmp2 = tcg_temp_new_i64(); /* divisor */
+ tmp3 = tcg_temp_new_i64();
+
+ /* dividend is r(r1 << 32) | r(r1 + 1) */
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_2);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+
+ /* divisor is r(r2) */
+ tcg_gen_ext_i32_i64(tmp2, tmp32_3);
+
+ tcg_gen_div_i64(tmp3, tmp, tmp2);
+ tcg_gen_rem_i64(tmp, tmp, tmp2);
+
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp3);
+
+ store_reg32(r1, tmp32_1); /* remainder */
+ store_reg32(r1 + 1, tmp32_2); /* quotient */
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x28: /* LDR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp = load_freg(r2);
+ store_freg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x38: /* LER R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_freg32(r2);
+ store_freg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x40: /* STH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tcg_gen_qemu_st16(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x41: /* la */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ store_reg(r1, tmp); /* FIXME: 31/24-bit addressing */
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x42: /* STC R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x43: /* IC R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ store_reg8(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x44: /* EX R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tmp3 = tcg_const_i64(s->pc + 4);
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ gen_helper_ex(cc_op, cc_op, tmp2, tmp, tmp3);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x46: /* BCT R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tcg_temp_free_i64(tmp);
+
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_subi_i32(tmp32_1, tmp32_1, 1);
+ store_reg32(r1, tmp32_1);
+
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp32_1, 0, l1);
+
+ /* not taking the branch, jump to after the instruction */
+ gen_goto_tb(s, 0, s->pc + 4);
+ gen_set_label(l1);
+
+ /* take the branch, move R2 into psw.addr */
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tcg_gen_mov_i64(psw_addr, tmp);
+ s->is_jmp = DISAS_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x47: /* BC M1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ gen_bcr(s, r1, tmp, s->pc + 4);
+ tcg_temp_free_i64(tmp);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+ case 0x48: /* LH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x49: /* CH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ cmp_s32(s, tmp32_1, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x4a: /* AH R1,D2(X2,B2) [RX] */
+ case 0x4b: /* SH R1,D2(X2,B2) [RX] */
+ case 0x4c: /* MH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ switch (opc) {
+ case 0x4a:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x4b:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x4c:
+ tcg_gen_mul_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x4d: /* BAS R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_const_i64(pc_to_link_info(s, s->pc + 4));
+ store_reg(r1, tmp2);
+ tcg_gen_mov_i64(psw_addr, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ s->is_jmp = DISAS_JUMP;
+ break;
+ case 0x4e: /* CVD R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp32_1, regs[r1]);
+ gen_helper_cvd(tmp2, tmp32_1);
+ tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x50: /* st r1, d2(x2, b2) */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x55: /* CL R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = load_reg32(r1);
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ cmp_u32(s, tmp32_2, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x54: /* N R1,D2(X2,B2) [RX] */
+ case 0x56: /* O R1,D2(X2,B2) [RX] */
+ case 0x57: /* X R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ gen_and_or_xor_i32(opc, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x58: /* l r1, d2(x2, b2) */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x59: /* C R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = load_reg32(r1);
+ tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ cmp_s32(s, tmp32_2, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x5a: /* A R1,D2(X2,B2) [RX] */
+ case 0x5b: /* S R1,D2(X2,B2) [RX] */
+ case 0x5e: /* AL R1,D2(X2,B2) [RX] */
+ case 0x5f: /* SL R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32s(tmp, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp);
+ switch (opc) {
+ case 0x5a:
+ case 0x5e:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ case 0x5b:
+ case 0x5f:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+ switch (opc) {
+ case 0x5a:
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5e:
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5b:
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5f:
+ set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x5c: /* M R1,D2(X2,B2) [RX] */
+ /* reg(r1, r1+1) = reg(r1+1) * *(s32*)addr */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s));
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ tcg_gen_ext32s_i64(tmp3, tmp3);
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x5d: /* D R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp3 = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r1 + 1);
+
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ /* dividend is r(r1 << 32) | r(r1 + 1) */
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_2);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+
+ /* divisor is in memory */
+ tcg_gen_qemu_ld32s(tmp2, tmp3, get_mem_index(s));
+
+ /* XXX divisor == 0 -> FixP divide exception */
+
+ tcg_gen_div_i64(tmp3, tmp, tmp2);
+ tcg_gen_rem_i64(tmp, tmp, tmp2);
+
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp3);
+
+ store_reg32(r1, tmp32_1); /* remainder */
+ store_reg32(r1 + 1, tmp32_2); /* quotient */
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x60: /* STD R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_freg(r1);
+ tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x68: /* LD R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
+ store_freg(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x70: /* STE R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_freg32(r1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x71: /* MS R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ tcg_gen_mul_i32(tmp32_1, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x78: /* LE R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ store_freg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0x80: /* SSM D2(B2) [S] */
+ /* Set System Mask */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp3, psw_mask, ~0xff00000000000000ULL);
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_shli_i64(tmp2, tmp2, 56);
+ tcg_gen_or_i64(psw_mask, tmp3, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x82: /* LPSW D2(B2) [S] */
+ /* Load PSW */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_addi_i64(tmp, tmp, 4);
+ tcg_gen_qemu_ld32u(tmp3, tmp, get_mem_index(s));
+ gen_helper_load_psw(tmp2, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ /* we need to keep cc_op intact */
+ s->is_jmp = DISAS_JUMP;
+ break;
+ case 0x83: /* DIAG R1,R3,D2 [RS] */
+ /* Diagnose call (KVM hypercall) */
+ check_privileged(s, ilc);
+ potential_page_fault(s);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp32_1 = tcg_const_i32(insn & 0xfff);
+ tmp2 = load_reg(2);
+ tmp3 = load_reg(1);
+ gen_helper_diag(tmp2, tmp32_1, tmp2, tmp3);
+ store_reg(2, tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+#endif
+ case 0x88: /* SRL R1,D2(B2) [RS] */
+ case 0x89: /* SLL R1,D2(B2) [RS] */
+ case 0x8a: /* SRA R1,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp);
+ tcg_gen_andi_i32(tmp32_2, tmp32_2, 0x3f);
+ switch (opc) {
+ case 0x88:
+ tcg_gen_shr_i32(tmp32_1, tmp32_1, tmp32_2);
+ break;
+ case 0x89:
+ tcg_gen_shl_i32(tmp32_1, tmp32_1, tmp32_2);
+ break;
+ case 0x8a:
+ tcg_gen_sar_i32(tmp32_1, tmp32_1, tmp32_2);
+ set_cc_s32(s, tmp32_1);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x8c: /* SRDL R1,D2(B2) [RS] */
+ case 0x8d: /* SLDL R1,D2(B2) [RS] */
+ case 0x8e: /* SRDA R1,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2); /* shift */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r1 + 1);
+ tcg_gen_concat_i32_i64(tmp2, tmp32_2, tmp32_1); /* operand */
+ switch (opc) {
+ case 0x8c:
+ tcg_gen_shr_i64(tmp2, tmp2, tmp);
+ break;
+ case 0x8d:
+ tcg_gen_shl_i64(tmp2, tmp2, tmp);
+ break;
+ case 0x8e:
+ tcg_gen_sar_i64(tmp2, tmp2, tmp);
+ set_cc_s64(s, tmp2);
+ break;
+ }
+ tcg_gen_shri_i64(tmp, tmp2, 32);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ store_reg32(r1, tmp32_1);
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ store_reg32(r1 + 1, tmp32_2);
+ break;
+ case 0x98: /* LM R1,R3,D2(B2) [RS] */
+ case 0x90: /* STM R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+
+ tmp = get_address(s, 0, b2, d2);
+ for (i = r1;; i = (i + 1) % 16) {
+ if (opc == 0x98) {
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ store_reg32_i64(i, tmp2);
+ tcg_temp_free_i64(tmp2);
+ } else {
+ tmp2 = load_reg(i);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ }
+ tcg_gen_addi_i64(tmp, tmp, 4);
+ if (i == r3) {
+ break;
+ }
+ }
+ break;
+ case 0x91: /* TM D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_const_i64(i2);
+ tcg_gen_qemu_ld8u(tmp, tmp, get_mem_index(s));
+ cmp_64(s, tmp, tmp2, CC_OP_TM_32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x92: /* MVI D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_const_i64(i2);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x94: /* NI D1(B1),I2 [SI] */
+ case 0x96: /* OI D1(B1),I2 [SI] */
+ case 0x97: /* XI D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ switch (opc) {
+ case 0x94:
+ tcg_gen_andi_i64(tmp2, tmp2, i2);
+ break;
+ case 0x96:
+ tcg_gen_ori_i64(tmp2, tmp2, i2);
+ break;
+ case 0x97:
+ tcg_gen_xori_i64(tmp2, tmp2, i2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ set_cc_nz_u64(s, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x95: /* CLI D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ cmp_u64c(s, tmp2, i2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_lam(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stam(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xa5:
+ insn = ld_code4(s->pc);
+ r1 = (insn >> 20) & 0xf;
+ op = (insn >> 16) & 0xf;
+ i2 = insn & 0xffff;
+ disas_a5(s, op, r1, i2);
+ break;
+ case 0xa7:
+ insn = ld_code4(s->pc);
+ r1 = (insn >> 20) & 0xf;
+ op = (insn >> 16) & 0xf;
+ i2 = (short)insn;
+ disas_a7(s, op, r1, i2);
+ break;
+ case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_mvcle(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_clcle(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xac: /* STNSM D1(B1),I2 [SI] */
+ case 0xad: /* STOSM D1(B1),I2 [SI] */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tmp2, psw_mask, 56);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ if (opc == 0xac) {
+ tcg_gen_andi_i64(psw_mask, psw_mask,
+ ((uint64_t)i2 << 56) | 0x00ffffffffffffffULL);
+ } else {
+ tcg_gen_ori_i64(psw_mask, psw_mask, (uint64_t)i2 << 56);
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = load_reg(r3);
+ tmp32_1 = tcg_const_i32(r1);
+ potential_page_fault(s);
+ gen_helper_sigp(cc_op, tmp, tmp32_1, tmp2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = tcg_const_i32(r1);
+ potential_page_fault(s);
+ gen_helper_lra(cc_op, tmp, tmp32_1);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+#endif
+ case 0xb2:
+ insn = ld_code4(s->pc);
+ op = (insn >> 16) & 0xff;
+ switch (op) {
+ case 0x9c: /* STFPC D2(B2) [S] */
+ d2 = insn & 0xfff;
+ b2 = (insn >> 12) & 0xf;
+ tmp32_1 = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i64();
+ tmp2 = get_address(s, 0, b2, d2);
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_qemu_st32(tmp, tmp2, get_mem_index(s));
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ default:
+ disas_b2(s, op, insn);
+ break;
+ }
+ break;
+ case 0xb3:
+ insn = ld_code4(s->pc);
+ op = (insn >> 16) & 0xff;
+ r3 = (insn >> 12) & 0xf; /* aka m3 */
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ disas_b3(s, op, r3, r1, r2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
+ /* Store Control */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stctl(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
+ /* Load Control */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_lctl(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#endif
+ case 0xb9:
+ insn = ld_code4(s->pc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ op = (insn >> 16) & 0xff;
+ disas_b9(s, op, r1, r2);
+ break;
+ case 0xba: /* CS R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_cs(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_clm(cc_op, tmp32_1, tmp32_2, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stcm(tmp32_1, tmp32_2, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xbf: /* ICM R1,M3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ if (r3 == 15) {
+ /* effectively a 32-bit load */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = tcg_const_i32(r3);
+ tcg_gen_qemu_ld32u(tmp, tmp, get_mem_index(s));
+ store_reg32_i64(r1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ set_cc_icm(s, tmp32_2, tmp32_1, r3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ } else if (r3) {
+ uint32_t mask = 0x00ffffffUL;
+ uint32_t shift = 24;
+ int m3 = r3;
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_const_i32(r3);
+ while (m3) {
+ if (m3 & 8) {
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ if (shift) {
+ tcg_gen_shli_i32(tmp32_2, tmp32_2, shift);
+ }
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, mask);
+ tcg_gen_or_i32(tmp32_1, tmp32_1, tmp32_2);
+ tcg_gen_addi_i64(tmp, tmp, 1);
+ }
+ m3 = (m3 << 1) & 0xf;
+ mask = (mask >> 8) | 0xff000000UL;
+ shift -= 8;
+ }
+ store_reg32(r1, tmp32_1);
+ set_cc_icm(s, tmp32_3, tmp32_2, r3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ } else {
+ /* i.e. env->cc = 0 */
+ gen_op_movi_cc(s, 0);
+ }
+ break;
+ case 0xc0:
+ case 0xc2:
+ insn = ld_code6(s->pc);
+ r1 = (insn >> 36) & 0xf;
+ op = (insn >> 32) & 0xf;
+ i2 = (int)insn;
+ switch (opc) {
+ case 0xc0:
+ disas_c0(s, op, r1, i2);
+ break;
+ case 0xc2:
+ disas_c2(s, op, r1, i2);
+ break;
+ default:
+ tcg_abort();
+ }
+ break;
+ case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
+ case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
+ case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
+ case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
+ case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
+ case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
+ case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
+ insn = ld_code6(s->pc);
+ vl = tcg_const_i32((insn >> 32) & 0xff);
+ b1 = (insn >> 28) & 0xf;
+ b2 = (insn >> 12) & 0xf;
+ d1 = (insn >> 16) & 0xfff;
+ d2 = insn & 0xfff;
+ tmp = get_address(s, 0, b1, d1);
+ tmp2 = get_address(s, 0, b2, d2);
+ switch (opc) {
+ case 0xd2:
+ gen_op_mvc(s, (insn >> 32) & 0xff, tmp, tmp2);
+ break;
+ case 0xd4:
+ potential_page_fault(s);
+ gen_helper_nc(cc_op, vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xd5:
+ gen_op_clc(s, (insn >> 32) & 0xff, tmp, tmp2);
+ break;
+ case 0xd6:
+ potential_page_fault(s);
+ gen_helper_oc(cc_op, vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xd7:
+ potential_page_fault(s);
+ gen_helper_xc(cc_op, vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xdc:
+ potential_page_fault(s);
+ gen_helper_tr(vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xf3:
+ potential_page_fault(s);
+ gen_helper_unpk(vl, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
+ case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
+ check_privileged(s, ilc);
+ potential_page_fault(s);
+ insn = ld_code6(s->pc);
+ r1 = (insn >> 36) & 0xf;
+ r3 = (insn >> 32) & 0xf;
+ b1 = (insn >> 28) & 0xf;
+ d1 = (insn >> 16) & 0xfff;
+ b2 = (insn >> 12) & 0xf;
+ d2 = insn & 0xfff;
+ tmp = load_reg(r1);
+ /* XXX key in r3 */
+ tmp2 = get_address(s, 0, b1, d1);
+ tmp3 = get_address(s, 0, b2, d2);
+ if (opc == 0xda) {
+ gen_helper_mvcp(cc_op, tmp, tmp2, tmp3);
+ } else {
+ gen_helper_mvcs(cc_op, tmp, tmp2, tmp3);
+ }
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+#endif
+ case 0xe3:
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ op = insn & 0xff;
+ r1 = (insn >> 36) & 0xf;
+ x2 = (insn >> 32) & 0xf;
+ b2 = (insn >> 28) & 0xf;
+ d2 = ((int)((((insn >> 16) & 0xfff)
+ | ((insn << 4) & 0xff000)) << 12)) >> 12;
+ disas_e3(s, op, r1, x2, b2, d2 );
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xe5:
+ /* Test Protection */
+ check_privileged(s, ilc);
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ disas_e5(s, insn);
+ break;
+#endif
+ case 0xeb:
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ op = insn & 0xff;
+ r1 = (insn >> 36) & 0xf;
+ r3 = (insn >> 32) & 0xf;
+ b2 = (insn >> 28) & 0xf;
+ d2 = ((int)((((insn >> 16) & 0xfff)
+ | ((insn << 4) & 0xff000)) << 12)) >> 12;
+ disas_eb(s, op, r1, r3, b2, d2);
+ break;
+ case 0xed:
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ op = insn & 0xff;
+ r1 = (insn >> 36) & 0xf;
+ x2 = (insn >> 32) & 0xf;
+ b2 = (insn >> 28) & 0xf;
+ d2 = (short)((insn >> 16) & 0xfff);
+ r1b = (insn >> 12) & 0xf;
+ disas_ed(s, op, r1, x2, b2, d2, r1b);
+ break;
+ default:
+ LOG_DISAS("unimplemented opcode 0x%x\n", opc);
+ gen_illegal_opcode(s, ilc);
+ break;
+ }
+
+ /* Instruction length is encoded in the opcode */
+ s->pc += (ilc * 2);
+}
+
+static inline void gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
+{
+ DisasContext dc;
+ target_ulong pc_start;
+ uint64_t next_page_start;
+ uint16_t *gen_opc_end;
+ int j, lj = -1;
+ int num_insns, max_insns;
+ CPUBreakpoint *bp;
+
+ pc_start = tb->pc;
+
+ /* 31-bit mode */
+ if (!(tb->flags & FLAG_MASK_64)) {
+ pc_start &= 0x7fffffff;
+ }
+
+ dc.pc = pc_start;
+ dc.is_jmp = DISAS_NEXT;
+ dc.tb = tb;
+ dc.cc_op = CC_OP_DYNAMIC;
+
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+
+ gen_icount_start();
+
+ do {
+ if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == dc.pc) {
+ gen_debug(&dc);
+ break;
+ }
+ }
+ }
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j) {
+ gen_opc_instr_start[lj++] = 0;
+ }
+ }
+ gen_opc_pc[lj] = dc.pc;
+ gen_opc_cc_op[lj] = dc.cc_op;
+ gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
+ }
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+#if defined(S390X_DEBUG_DISAS_VERBOSE)
+ LOG_DISAS("pc " TARGET_FMT_lx "\n",
+ dc.pc);
+#endif
+ disas_s390_insn(&dc);
+
+ num_insns++;
+ if (env->singlestep_enabled) {
+ gen_debug(&dc);
+ }
+ } while (!dc.is_jmp && gen_opc_ptr < gen_opc_end && dc.pc < next_page_start
+ && num_insns < max_insns && !env->singlestep_enabled
+ && !singlestep);
+
+ if (!dc.is_jmp) {
+ update_psw_addr(&dc);
+ }
+
+ if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
+ gen_op_calc_cc(&dc);
+ } else {
+ /* next TB starts off with CC_OP_DYNAMIC, so make sure the cc op type
+ is in env */
+ gen_op_set_cc_op(&dc);
+ }
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+ /* Generate the return instruction */
+ if (dc.is_jmp != DISAS_TB_JUMP) {
+ tcg_gen_exit_tb(0);
+ }
+ gen_icount_end(tb, num_insns);
+ *gen_opc_ptr = INDEX_op_end;
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ lj++;
+ while (lj <= j) {
+ gen_opc_instr_start[lj++] = 0;
+ }
+ } else {
+ tb->size = dc.pc - pc_start;
+ tb->icount = num_insns;
+ }
+#if defined(S390X_DEBUG_DISAS)
+ log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(pc_start, dc.pc - pc_start, 1);
+ qemu_log("\n");
+ }
+#endif
}
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
{
+ gen_intermediate_code_internal(env, tb, 0);
}
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
{
+ gen_intermediate_code_internal(env, tb, 1);
}
void gen_pc_load(CPUState *env, TranslationBlock *tb,
unsigned long searched_pc, int pc_pos, void *puc)
{
+ int cc_op;
env->psw.addr = gen_opc_pc[pc_pos];
+ cc_op = gen_opc_cc_op[pc_pos];
+ if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
+ env->cc_op = cc_op;
+ }
}
--
1.6.0.2
^ permalink raw reply related [flat|nested] 29+ messages in thread