From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1Lm8rv-0002sG-OE for qemu-devel@nongnu.org; Tue, 24 Mar 2009 11:48:48 -0400 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1Lm8rl-0002je-9E for qemu-devel@nongnu.org; Tue, 24 Mar 2009 11:48:39 -0400 Received: from [199.232.76.173] (port=33149 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Lm8rh-0002ir-EI for qemu-devel@nongnu.org; Tue, 24 Mar 2009 11:48:33 -0400 Received: from mel.act-europe.fr ([212.99.106.210]:52855) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1Lm8rf-0003ga-Rm for qemu-devel@nongnu.org; Tue, 24 Mar 2009 11:48:33 -0400 Received: from localhost (localhost [127.0.0.1]) by filtered-smtp.eu.adacore.com (Postfix) with ESMTP id 9A9F9290066 for ; Tue, 24 Mar 2009 16:48:07 +0100 (CET) Received: from mel.act-europe.fr ([127.0.0.1]) by localhost (smtp.eu.adacore.com [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id Q-4vyc-i-jjN for ; Tue, 24 Mar 2009 16:48:07 +0100 (CET) Received: from ulanbator.act-europe.fr (ulanbator.act-europe.fr [10.10.1.67]) by mel.act-europe.fr (Postfix) with ESMTP id 7A888290017 for ; Tue, 24 Mar 2009 16:48:07 +0100 (CET) From: Tristan Gingold Date: Tue, 24 Mar 2009 16:48:07 +0100 Message-Id: <1237909687-31711-26-git-send-email-gingold@adacore.com> In-Reply-To: <1237909687-31711-25-git-send-email-gingold@adacore.com> References: <1237909687-31711-1-git-send-email-gingold@adacore.com> <1237909687-31711-2-git-send-email-gingold@adacore.com> <1237909687-31711-3-git-send-email-gingold@adacore.com> <1237909687-31711-4-git-send-email-gingold@adacore.com> <1237909687-31711-5-git-send-email-gingold@adacore.com> <1237909687-31711-6-git-send-email-gingold@adacore.com> <1237909687-31711-7-git-send-email-gingold@adacore.com> <1237909687-31711-8-git-send-email-gingold@adacore.com> <1237909687-31711-9-git-send-email-gingold@adacore.com> <1237909687-31711-10-git-send-email-gingold@adacore.com> <1237909687-31711-11-git-send-email-gingold@adacore.com> <1237909687-31711-12-git-send-email-gingold@adacore.com> <1237909687-31711-13-git-send-email-gingold@adacore.com> <1237909687-31711-14-git-send-email-gingold@adacore.com> <1237909687-31711-15-git-send-email-gingold@adacore.com> <1237909687-31711-16-git-send-email-gingold@adacore.com> <1237909687-31711-17-git-send-email-gingold@adacore.com> <1237909687-31711-18-git-send-email-gingold@adacore.com> <1237909687-31711-19-git-send-email-gingold@adacore.com> <1237909687-31711-20-git-send-email-gingold@adacore.com> <1237909687-31711-21-git-send-email-gingold@adacore.com> <1237909687-31711-22-git-send-email-gingold@adacore.com> <1237909687-31711-23-git-send-email-gingold@adacore.com> <1237909687-31711-24-git-send-email-gingold@adacore.com> <1237909687-31711-25-git-send-email-gingold@adacore.com> Subject: [Qemu-devel] [PATCH 25/25] Add full emulation for 21264. Reply-To: qemu-devel@nongnu.org List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org This is still work in progress but complete enough to run the firmware. Remove old and incomplete code for pal virtualization. Signed-off-by: Tristan Gingold --- cpu-exec.c | 1 + hw/alpha_palcode.c | 37 ++- target-alpha/cpu.h | 284 +++++++++++------ target-alpha/helper.c | 780 +++++++++++++++++++++++++++++----------------- target-alpha/helper.h | 6 +- target-alpha/op_helper.c | 195 ++++++++----- target-alpha/translate.c | 147 +++++---- 7 files changed, 913 insertions(+), 537 deletions(-) diff --git a/cpu-exec.c b/cpu-exec.c index cf7c1fb..674d521 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -486,6 +486,7 @@ int cpu_exec(CPUState *env1) } #elif defined(TARGET_ALPHA) if (interrupt_request & CPU_INTERRUPT_HARD) { + env->exception_index = EXCP_GEN_INTERRUPT; do_interrupt(env); next_tb = 0; } diff --git a/hw/alpha_palcode.c b/hw/alpha_palcode.c index 189033c..c778319 100644 --- a/hw/alpha_palcode.c +++ b/hw/alpha_palcode.c @@ -22,10 +22,39 @@ #include #include -#include "qemu.h" #include "cpu.h" #include "exec-all.h" +typedef struct pal_handler_t pal_handler_t; +struct pal_handler_t { + /* Reset */ + void (*reset)(CPUAlphaState *env); + /* Uncorrectable hardware error */ + void (*machine_check)(CPUAlphaState *env); + /* Arithmetic exception */ + void (*arithmetic)(CPUAlphaState *env); + /* Interrupt / correctable hardware error */ + void (*interrupt)(CPUAlphaState *env); + /* Data fault */ + void (*dfault)(CPUAlphaState *env); + /* DTB miss pal */ + void (*dtb_miss_pal)(CPUAlphaState *env); + /* DTB miss native */ + void (*dtb_miss_native)(CPUAlphaState *env); + /* Unaligned access */ + void (*unalign)(CPUAlphaState *env); + /* ITB miss */ + void (*itb_miss)(CPUAlphaState *env); + /* Instruction stream access violation */ + void (*itb_acv)(CPUAlphaState *env); + /* Reserved or privileged opcode */ + void (*opcdec)(CPUAlphaState *env); + /* Floating point exception */ + void (*fen)(CPUAlphaState *env); + /* Call pal instruction */ + void (*call_pal)(CPUAlphaState *env, uint32_t palcode); +}; + /* Shared handlers */ static void pal_reset (CPUState *env); /* Console handlers */ @@ -88,7 +117,7 @@ static void do_swappal (CPUState *env, uint64_t palid) case 0 ... 2: pal_handler = &pal_handlers[palid]; env->pal_handler = pal_handler; - env->ipr[IPR_PAL_BASE] = -1ULL; + env->pal_base = -1ULL; (*pal_handler->reset)(env); break; case 3 ... 255: @@ -98,8 +127,8 @@ static void do_swappal (CPUState *env, uint64_t palid) default: /* We were given the entry point address */ env->pal_handler = NULL; - env->ipr[IPR_PAL_BASE] = palid; - env->pc = env->ipr[IPR_PAL_BASE]; + env->pal_base = palid; + env->pc = env->pal_base; cpu_loop_exit(); } } diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h index 46d47fe..0e9b33e 100644 --- a/target-alpha/cpu.h +++ b/target-alpha/cpu.h @@ -140,6 +140,23 @@ enum { FP_ROUND_DYNAMIC = 0x3, }; +/* How palcode is interpreted. */ +enum pal_emul { + /* No pal emulation (user linux). */ + PAL_NONE, + + /* As a real-cpu palcode. */ + PAL_21264 +#if 0 + PAL_21064, + PAL_21164, + /* Palcode virtualization (TODO). */ + PAL_CONSOLE, + PAL_OPENVMS, + PAL_UNIX +#endif +}; + /* Internal processor registers */ /* XXX: TOFIX: most of those registers are implementation dependant */ enum { @@ -170,7 +187,7 @@ enum { IPR_IER_CM = 0x0B, /* 21264: = CM | IER */ IPR_SIRR = 0x0C, /* 21264 */ #define IPR_SIRR_SHIFT 14 -#define IPR_SIRR_MASK 0x7fff +#define IPR_SIRR_MASK (0x7fffULL << IPR_SIRR_SHIFT) IPR_ISUM = 0x0D, /* 21264 */ IPR_HW_INT_CLR = 0x0E, /* 21264 */ IPR_EXC_SUM = 0x0F, @@ -212,8 +229,8 @@ enum { #define IPR_DTB_ALTMODE_MASK 3 IPR_DTB_IAP = 0xA2, IPR_DTB_IA = 0xA3, /* 21264 */ - IPR_DTB_IS0 = 0x24, - IPR_DTB_IS1 = 0xA4, + IPR_DTB_IS0 = 0x24, /* 21264 */ + IPR_DTB_IS1 = 0xA4, /* 21264 */ IPR_DTB_ASN0 = 0x25, /* 21264 */ IPR_DTB_ASN1 = 0xA5, /* 21264 */ #define IPR_DTB_ASN_SHIFT 56 @@ -225,74 +242,44 @@ enum { IPR_DC_STAT = 0x2A, /* 21264 */ /* Cbox IPRs */ IPR_C_DATA = 0x2B, - IPR_C_SHIFT = 0x2C, - - IPR_ASN, - IPR_ASTEN, - IPR_ASTSR, - IPR_DATFX, - IPR_ESP, - IPR_FEN, - IPR_IPIR, - IPR_IPL, - IPR_KSP, - IPR_MCES, - IPR_PERFMON, - IPR_PCBB, - IPR_PRBR, - IPR_PTBR, - IPR_SCBB, - IPR_SISR, - IPR_SSP, - IPR_SYSPTBR, - IPR_TBCHK, - IPR_TBIA, - IPR_TBIAP, - IPR_TBIS, - IPR_TBISD, - IPR_TBISI, - IPR_USP, - IPR_VIRBND, - IPR_VPTB, - IPR_WHAMI, - IPR_ALT_MODE, - IPR_LAST, + IPR_C_SHIFT = 0x2C }; typedef struct CPUAlphaState CPUAlphaState; -typedef struct pal_handler_t pal_handler_t; -struct pal_handler_t { - /* Reset */ - void (*reset)(CPUAlphaState *env); - /* Uncorrectable hardware error */ - void (*machine_check)(CPUAlphaState *env); - /* Arithmetic exception */ - void (*arithmetic)(CPUAlphaState *env); - /* Interrupt / correctable hardware error */ - void (*interrupt)(CPUAlphaState *env); - /* Data fault */ - void (*dfault)(CPUAlphaState *env); - /* DTB miss pal */ - void (*dtb_miss_pal)(CPUAlphaState *env); - /* DTB miss native */ - void (*dtb_miss_native)(CPUAlphaState *env); - /* Unaligned access */ - void (*unalign)(CPUAlphaState *env); - /* ITB miss */ - void (*itb_miss)(CPUAlphaState *env); - /* Instruction stream access violation */ - void (*itb_acv)(CPUAlphaState *env); - /* Reserved or privileged opcode */ - void (*opcdec)(CPUAlphaState *env); - /* Floating point exception */ - void (*fen)(CPUAlphaState *env); - /* Call pal instruction */ - void (*call_pal)(CPUAlphaState *env, uint32_t palcode); +struct alpha_pte { + uint32_t pa; + uint16_t fl; + uint8_t asn; +}; + +#define MAX_NBR_TLB_21264 128 +struct alpha_21264_tlb { + short int in_use; + short int next; + unsigned char spe; + struct alpha_21264_tlbe { + uint64_t va; + struct alpha_pte pte; + } entries[MAX_NBR_TLB_21264]; }; +#define ALPHA_PTE_V (1 << 0) +#define ALPHA_PTE_FOR (1 << 1) +#define ALPHA_PTE_FOW (1 << 2) +#define ALPHA_PTE_ASM (1 << 4) +#define ALPHA_PTE_GH_SHIFT 5 +#define ALPHA_PTE_KRE (1 << 8) +#define ALPHA_PTE_ERE (1 << 9) +#define ALPHA_PTE_SRE (1 << 10) +#define ALPHA_PTE_URE (1 << 11) +#define ALPHA_PTE_KWE (1 << 12) +#define ALPHA_PTE_EWE (1 << 13) +#define ALPHA_PTE_SWE (1 << 14) +#define ALPHA_PTE_UWE (1 << 15) + #if !defined(CONFIG_USER_ONLY) -#define NB_MMU_MODES 4 +#define NB_MMU_MODES 5 #else #define NB_MMU_MODES 2 #endif @@ -304,29 +291,99 @@ struct CPUAlphaState { uint64_t fpcr; uint64_t pc; uint64_t lock; - uint32_t pcc[2]; - uint64_t ipr[IPR_LAST]; - uint64_t ps; - uint64_t unique; - int saved_mode; /* Used for HW_LD / HW_ST */ - int intr_flag; /* For RC and RS */ - -#if TARGET_LONG_BITS > HOST_LONG_BITS - /* temporary fixed-point registers - * used to emulate 64 bits target on 32 bits hosts - */ - target_ulong t0, t1; -#endif /* Those resources are used only in Qemu core */ CPU_COMMON - uint32_t hflags; + unsigned char intr_flag; /* For RC and RS */ + unsigned char fen; /* FPU enable */ + unsigned char pal_mode; + enum pal_emul pal_emul; + unsigned char mmu_data_index; /* 0-3 */ + unsigned char mmu_code_index; /* 0-4 (pal). */ + + /* Common. */ + uint64_t pal_base; + uint64_t exc_addr; + #if defined(CONFIG_USER_ONLY) struct { uint64_t usp; uint64_t unique; } user; +#else + union { + struct { + /* Trick to emulate an Icache during early pal decompression. */ + uint64_t pal_reloc_mask; + uint64_t pal_reloc_val; + uint64_t pal_reloc_offset; + + /* Shadow registers for pal mode. */ + uint64_t shadow_r4; + uint64_t shadow_r5; + uint64_t shadow_r6; + uint64_t shadow_r7; + uint64_t shadow_r20; + uint64_t shadow_r21; + uint64_t shadow_r22; + uint64_t shadow_r23; + + /* CC */ + uint32_t cc_counter; + uint64_t cc_load_ticks; + + /* CC_CTL */ + uint64_t cc_offset; /* Only the 32 MSB are set. */ + unsigned char cc_ena; + + /* I_CTL */ + uint64_t i_vptb; + unsigned char iva_48; + unsigned char hwe; + unsigned char sde1; + unsigned char chip_id; + unsigned char ic_en; + unsigned char call_pal_r23; + + /* IER + CM */ + unsigned char cm; + uint64_t ier; + + uint64_t isum; + uint64_t ipend; /* fake. */ + + /* VA_CTL */ + uint64_t d_vptb; + unsigned char dva_48; + + /* PCTX. */ + unsigned char asn; + unsigned char astrr; + unsigned char aster; + unsigned char fpe; + unsigned char ppce; + + unsigned char altmode; + + /* SIRR */ + uint32_t sirr; + + uint64_t mm_stat; + uint64_t iva_form; + + uint64_t va_form; + uint64_t va; + + uint64_t itb_tag; + uint64_t itb_pte; + uint64_t dtb_tag; + uint64_t dtb_pte; + unsigned char dtb_asn; + + struct alpha_21264_tlb itlb, dtlb; + } a21264; + }; #endif int error_code; @@ -334,7 +391,6 @@ struct CPUAlphaState { uint32_t features; uint32_t amask; int implver; - pal_handler_t *pal_handler; }; #define cpu_init cpu_alpha_init @@ -389,18 +445,33 @@ enum { }; enum { - EXCP_RESET = 0x0000, - EXCP_MCHK = 0x0020, - EXCP_ARITH = 0x0060, - EXCP_HW_INTERRUPT = 0x00E0, - EXCP_DFAULT = 0x01E0, - EXCP_DTB_MISS_PAL = 0x09E0, - EXCP_ITB_MISS = 0x03E0, - EXCP_ITB_ACV = 0x07E0, - EXCP_DTB_MISS_NATIVE = 0x08E0, - EXCP_UNALIGN = 0x11E0, - EXCP_OPCDEC = 0x13E0, - EXCP_FEN = 0x17E0, + EXCP_21064_RESET = 0x0000, + EXCP_21064_MCHK = 0x0020, + EXCP_21064_ARITH = 0x0060, + EXCP_21064_HW_INTERRUPT = 0x00E0, + EXCP_21064_DFAULT = 0x01E0, + EXCP_21064_DTB_MISS_PAL = 0x09E0, + EXCP_21064_ITB_MISS = 0x03E0, + EXCP_21064_ITB_ACV = 0x07E0, + EXCP_21064_DTB_MISS_NATIVE = 0x08E0, + EXCP_21064_UNALIGN = 0x11E0, + EXCP_21064_OPCDEC = 0x13E0, + EXCP_21064_FEN = 0x17E0, + + EXCP_21264_DTBM_DOUBLE_3 = 0x0100, + EXCP_21264_DTBM_DOUBLE_4 = 0x0180, + EXCP_21264_FEN = 0x0200, + EXCP_21264_UNALIGN = 0x0280, + EXCP_21264_DTBM_SINGLE = 0x0300, + EXCP_21264_DFAULT = 0x0380, + EXCP_21264_OPCDEC = 0x0400, + EXCP_21264_IACV = 0x0480, + EXCP_21264_MCHK = 0x0500, + EXCP_21264_ITB_MISS = 0x0580, + EXCP_21264_ARITH = 0x0600, + EXCP_21264_INTERRUPT = 0x0680, + EXCP_21264_MT_FPCR = 0x0700, + EXCP_21264_RESET = 0x0780, /* Generic exception - to be mapped to processor. */ EXCP_GEN_OPCDEC = 1, @@ -474,11 +545,21 @@ int cpu_alpha_handle_mmu_fault (CPUState *env, uint64_t address, int rw, int mmu_idx, int is_softmmu); void do_interrupt (CPUState *env); -int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp); -int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp); -#if !defined (CONFIG_USER_ONLY) -void call_pal (CPUState *env); -#endif +uint64_t cpu_alpha_mfpr_21264 (CPUState *env, int iprn); +void cpu_alpha_mtpr_21264 (CPUState *env, int iprn, uint64_t val); +void init_cpu_21264(CPUState *env); +void swap_shadow_21264(CPUState *env); +struct alpha_pte cpu_alpha_mmu_v2p_21264(CPUState *env, int64_t address, + int rwx); +void cpu_alpha_mmu_dfault_21264(CPUState *env, int64_t address, int opc); +int cpu_alpha_mmu_fault_21264 (CPUState *env, int64_t address, int rwx, + int mmu_idx, void *retaddr); + +void cpu_alpha_mmu_fault_pal(CPUState *env, int64_t address); + +void alpha_21264_srm_write(CPUState *env); + +void cpu_alpha_update_irq (CPUState *env, int irqs); static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) { @@ -490,7 +571,16 @@ static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, { *pc = env->pc; *cs_base = 0; - *flags = env->ps; + *flags = (env->mmu_code_index << 2) | env->mmu_data_index; } +/* Flags for virt_to_phys helper. */ +#define ALPHA_HW_MMUIDX_MASK 3 +#define ALPHA_HW_V (1 << 2) +#define ALPHA_HW_W (1 << 3) +#define ALPHA_HW_E (1 << 4) +#define ALPHA_HW_A (1 << 8) +#define ALPHA_HW_L (1 << 9) +#define ALPHA_HW_Q (1 << 10) + #endif /* !defined (__CPU_ALPHA_H__) */ diff --git a/target-alpha/helper.c b/target-alpha/helper.c index 4046463..4c5a0a4 100644 --- a/target-alpha/helper.c +++ b/target-alpha/helper.c @@ -23,6 +23,7 @@ #include #include "cpu.h" +#include "sysemu.h" #include "exec-all.h" #if defined(CONFIG_USER_ONLY) @@ -34,7 +35,7 @@ int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw, env->exception_index = EXCP_USER_ITB_MISS; else env->exception_index = EXCP_USER_DFAULT; - env->ipr[IPR_EXC_ADDR] = address; + env->exc_addr = address; return 1; } @@ -56,354 +57,548 @@ target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr) return -1; } -int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw, - int mmu_idx, int is_softmmu) +#define GH_MASK(gh) (~((1ULL << (13 + gh)) - 1)) +#define TB_PTE_GET_GH(v) (((v) >> 5) & 3) +#define TB_PTE_GET_RE(v) (((v) >> 8) & 0x0f) +#define TB_PTE_GET_WE(v) (((v) >> 12) & 0x0f) +#define TB_PTE_GET_ASM(v) (((v) >> 4) & 1) +#define TB_PTE_GET_FO(v) (((v) >> 1) & 2) +#define IPR_CM_GET_CM(v) (((v) >> 3) & 3) + +static uint64_t va_form (int64_t va, uint64_t vptb, int form) { - uint32_t opc; + va = (va >> 13) << 3; + switch (form) { + case 0: /* VA_48 = 0, VA_FORM_32 = 0 */ + return (vptb & 0xfffffffe00000000ULL) + | (va & 0x00000001fffffff8ULL); + case 1: /* VA_48 = 1, VA_FORM_32 = 0 */ + return (vptb & 0xfffff80000000000ULL) + | (((va << 26) >> 26) & 0x000007fffffffff8ULL); + case 2: /* VA_48 = 0, VA_FORM_32 = 1 */ + return (vptb & 0xffffffffc0000000ULL) + | (va & 0x00000000003ffff8ULL); + default: + abort(); + } +} - if (rw == 2) { +struct alpha_pte cpu_alpha_mmu_v2p_21264(CPUState *env, int64_t address, + int rwx) +{ + struct alpha_21264_tlb *tlb; + struct alpha_pte pte; + int i; + int va_sh; + + if (rwx == 2) { /* Instruction translation buffer miss */ - env->exception_index = EXCP_ITB_MISS; + tlb = &env->a21264.itlb; + va_sh = env->a21264.iva_48 ? 64 - 48 : 64 - 43; } else { - if (env->ipr[IPR_EXC_ADDR] & 1) - env->exception_index = EXCP_DTB_MISS_PAL; + /* Data translation buffer miss */ + tlb = &env->a21264.dtlb; + va_sh = env->a21264.dva_48 ? 64 - 48 : 64 - 43; + } + + /* Check sign extension. */ + if (((address << va_sh) >> va_sh) != address) + return ((struct alpha_pte){0, 0, 1}); + +#if 0 + fprintf(stderr, "mmu_fault_21264: addr=%016llx, rwx=%d\n", address, rwx); +#endif + + /* Super page. */ + if ((tlb->spe & 4) && ((address >> 46) & 3) == 2) { + pte.pa = (address & 0x000008ffffffe000ULL) >> 13; + pte.fl = ALPHA_PTE_KRE | ALPHA_PTE_KWE | ALPHA_PTE_V; + pte.asn = 0; + return pte; + } + else if ((tlb->spe & 2) && ((address >> 41) & 0x7f) == 0x7e) { + pte.pa = (((address << 23) >> 23) & 0x000008ffffffe000ULL) >> 13; + pte.fl = ALPHA_PTE_KRE | ALPHA_PTE_KWE | ALPHA_PTE_V; + pte.asn = 0; + return pte; + } + if ((tlb->spe & 1) && ((address >> 30) & 0x3ffff) == 0x3fffe) { + pte.pa = (address & 0x000000003fffe000ULL) >> 13; + pte.fl = ALPHA_PTE_KRE | ALPHA_PTE_KWE | ALPHA_PTE_V; + pte.asn = 0; + return pte; + } + + /* Search in TLB. */ + for (i = 0; i < MAX_NBR_TLB_21264; i++) { + struct alpha_21264_tlbe *tlbe = &tlb->entries[i]; + + if ((tlbe->pte.fl & ALPHA_PTE_V) + && tlbe->va == (address & TARGET_PAGE_MASK) + && ((tlbe->pte.asn == env->a21264.asn) + || (tlbe->pte.fl & ALPHA_PTE_ASM))) { + return tlbe->pte; + } + } + + return ((struct alpha_pte){0, 0, 0}); +} + +void cpu_alpha_mmu_fault_pal(CPUState *env, int64_t address) +{ + target_ulong phys_addr = address & TARGET_PAGE_MASK; + + if ((address & env->a21264.pal_reloc_mask) + == env->a21264.pal_reloc_val) + phys_addr += env->a21264.pal_reloc_offset; + + tlb_set_page_exec(env, address & TARGET_PAGE_MASK, phys_addr, + PAGE_EXEC, MMU_PAL_IDX, 1); +} + +void cpu_alpha_mmu_dfault_21264(CPUState *env, int64_t address, int opc) +{ + env->a21264.mm_stat = (opc << 4) | ((opc & 0x04) != 0 ? 1 : 0); + + env->a21264.va = address; + env->a21264.va_form = + va_form(address, env->a21264.d_vptb, env->a21264.dva_48); +} + +int cpu_alpha_mmu_fault_21264(CPUState *env, int64_t address, int rwx, + int mmu_idx, void *retaddr) +{ + struct alpha_pte pte; + int rights; + + pte = cpu_alpha_mmu_v2p_21264(env, address, rwx); + + rights = (pte.fl >> env->a21264.cm); + if ((pte.fl & ALPHA_PTE_V) + && (rights & (rwx == 1 ? ALPHA_PTE_KWE : ALPHA_PTE_KRE)) + && (rwx == 2 || !((pte.fl >> rwx) & ALPHA_PTE_FOR))) { + tlb_set_page_exec + (env, address & TARGET_PAGE_MASK, ((uint64_t)pte.pa) << 13, + (rwx == 2) ? PAGE_READ | PAGE_EXEC + : ((rights & ALPHA_PTE_KWE ? PAGE_WRITE : 0) + | (rights & ALPHA_PTE_KRE ? PAGE_READ : 0)), + mmu_idx, 1); + return 0; + } + + /* Not found. */ + if (rwx == 2) { + env->exception_index = + (pte.fl == 0 && pte.asn == 0) ? EXCP_21264_ITB_MISS + : EXCP_21264_IACV; + + env->a21264.iva_form = + va_form(address, env->a21264.i_vptb, env->a21264.iva_48); + if (retaddr) + abort(); + } else { + TranslationBlock *tb; + unsigned long pc; + uint64_t phys_pc; + uint32_t insn; + + if (pte.fl == 0 && pte.asn == 0) + env->exception_index = EXCP_21264_DTBM_SINGLE; else - env->exception_index = EXCP_DTB_MISS_NATIVE; - opc = (ldl_code(env->pc) >> 21) << 4; - if (rw) { - opc |= 0x9; - } else { - opc |= 0x4; + env->exception_index = EXCP_21264_DFAULT; + + /* In order to correctly set mm_stat and find the right exception, + we must find which instruction created the fault. */ + + /* This code can only be called from a tb (but the debugger!). + FIXME: remove the abort() when the code is correct. */ + if (!likely(retaddr)) + abort(); + + /* now we have a real cpu fault */ + pc = (unsigned long)retaddr; + tb = tb_find_pc(pc); + if (!likely(tb)) { + /* Not from translated code!! Not possible. */ + abort(); } - env->ipr[IPR_MM_STAT] = opc; + + /* the PC is inside the translated code. It means that we + have a virtual CPU fault */ + cpu_restore_state(tb, env, pc, NULL); + + /* Extract physical pc address. */ + /* FIXME: justify why page_addr[1] is not needed. */ + phys_pc = tb->page_addr[0] + (env->pc & ~TARGET_PAGE_MASK); + + /* Extract instruction. */ + insn = ldl_phys(phys_pc); + + cpu_alpha_mmu_dfault_21264(env, address, insn >> 26); } return 1; } -int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp) +static void insert_itlb_21264(CPUState *env, int64_t va, uint64_t pte) { - uint64_t hwpcb; - int ret = 0; + struct alpha_21264_tlb *tlb = &env->a21264.itlb; + struct alpha_21264_tlbe *e = &tlb->entries[tlb->next]; + + /* Should an already matching entry be discarded ? Not sure. */ + e->va = ((va & TARGET_PAGE_MASK) << 16) >> 16; + e->pte.pa = pte >> 13; + e->pte.fl = (pte & 0x1fff) | ALPHA_PTE_V; + e->pte.asn = env->a21264.asn; - hwpcb = env->ipr[IPR_PCBB]; + tlb->next = (tlb->next + 1) % MAX_NBR_TLB_21264; +} + +static void insert_dtlb_21264(CPUState *env, int64_t va, uint64_t pte) +{ + struct alpha_21264_tlb *tlb = &env->a21264.dtlb; + struct alpha_21264_tlbe *e = &tlb->entries[tlb->next]; + + /* Should an already matching entry be discarded ? Not sure. */ + e->va = ((va & TARGET_PAGE_MASK) << 16) >> 16; + e->pte.pa = pte >> 32; + e->pte.asn = env->a21264.asn; + e->pte.fl = pte | ALPHA_PTE_V; + tlb->next = (tlb->next + 1) % MAX_NBR_TLB_21264; +} + +static void flush_tlb_21264(CPUState *env, struct alpha_21264_tlb *tlb) +{ + int i; + + for (i = 0; i < MAX_NBR_TLB_21264; i++) { + struct alpha_21264_tlbe *e = &tlb->entries[i]; + if (!(e->pte.fl & ALPHA_PTE_ASM) + && e->pte.asn == env->a21264.asn) + e->pte.fl = 0; + } +} + +static void flush_tlb_21264_page(CPUState *env, struct alpha_21264_tlb *tlb, + uint64_t addr) +{ + int i; + + for (i = 0; i < MAX_NBR_TLB_21264; i++) { + struct alpha_21264_tlbe *e = &tlb->entries[i]; + if (!(e->pte.fl & ALPHA_PTE_ASM) + && e->pte.asn == env->a21264.asn + && e->va == addr) + e->pte.fl = 0; + } +} + +uint64_t cpu_alpha_mfpr_21264 (CPUState *env, int iprn) +{ switch (iprn) { - case IPR_ASN: - if (env->features & FEATURE_ASN) - *valp = env->ipr[IPR_ASN]; - else - *valp = 0; - break; - case IPR_ASTEN: - *valp = ((int64_t)(env->ipr[IPR_ASTEN] << 60)) >> 60; - break; - case IPR_ASTSR: - *valp = ((int64_t)(env->ipr[IPR_ASTSR] << 60)) >> 60; - break; - case IPR_DATFX: - /* Write only */ - ret = -1; - break; - case IPR_ESP: - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_ESP]; - else - *valp = ldq_raw(hwpcb + 8); - break; - case IPR_FEN: - *valp = ((int64_t)(env->ipr[IPR_FEN] << 63)) >> 63; - break; - case IPR_IPIR: - /* Write-only */ - ret = -1; - break; - case IPR_IPL: - *valp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59; + case IPR_PAL_BASE: + return env->pal_base; + case IPR_I_CTL: + return env->a21264.i_vptb + | (env->a21264.chip_id << IPR_I_CTL_CHIP_ID_SHIFT) + | (env->a21264.iva_48 << IPR_I_CTL_VA_48_SHIFT) + | (env->a21264.hwe << IPR_I_CTL_HWE_SHIFT) + | (env->a21264.sde1 << IPR_I_CTL_SDE1_SHIFT) + | (env->a21264.ic_en << IPR_I_CTL_IC_EN_SHIFT) + | (env->a21264.call_pal_r23 << IPR_I_CTL_CALL_PAL_R23_SHIFT) + | (env->a21264.itlb.spe << IPR_I_CTL_SPE_SHIFT); + case IPR_IVA_FORM: + return env->a21264.iva_form; + case IPR_VA: + return env->a21264.va; + case IPR_EXC_ADDR: + return env->exc_addr; + case IPR_I_STAT: /* Not emulated */ + case IPR_DC_STAT: /* Not emulated */ + return 0; + case IPR_C_DATA: + case IPR_C_SHIFT: + return 0; + case IPR_PCTX ... IPR_PCTX_ALL: + return (((uint64_t)env->a21264.asn) << IPR_PCTX_ASN_SHIFT) + | (((uint64_t)env->a21264.astrr) << IPR_PCTX_ASTRR_SHIFT) + | (((uint64_t)env->a21264.aster) << IPR_PCTX_ASTER_SHIFT) + | (((uint64_t)env->a21264.fpe) << IPR_PCTX_FPE_SHIFT) + | (((uint64_t)env->a21264.ppce) << IPR_PCTX_PPCE_SHIFT); + case IPR_IER_CM: + return (((uint64_t)env->a21264.cm) << IPR_CM_SHIFT) + | env->a21264.ier; + case IPR_ISUM: + return env->a21264.isum; + case IPR_SIRR: + return env->a21264.sirr; + case IPR_MM_STAT: + return env->a21264.mm_stat; + case IPR_VA_FORM: + return env->a21264.va_form; + case IPR_EXC_SUM: + return 0; /* FIXME. */ + default: + cpu_abort(env, "cpu_alpha_mfpr_21264: ipr 0x%x not handled\n", iprn); + } +} + +void cpu_alpha_mtpr_21264 (CPUState *env, int iprn, uint64_t val) +{ +#if 0 + qemu_log("cpu_alpha_mtpr_21264: ipr=0x%02x, val="TARGET_FMT_lx"\n", + iprn, val); +#endif + switch (iprn) { + case IPR_CC: + env->a21264.cc_offset = (val >> 32) << 32; + break; + case IPR_CC_CTL: + env->a21264.cc_ena = (val >> IPR_CC_CTL_ENA_SHIFT) & 1; + env->a21264.cc_counter = val & IPR_CC_CTL_COUNTER_MASK; + env->a21264.cc_load_ticks = cpu_get_ticks(); + break; + case IPR_ITB_TAG: + env->a21264.itb_tag = val & 0x0000ffffffffe000ULL; + break; + case IPR_DTB_TAG0: + env->a21264.dtb_tag = val & 0x0000ffffffffe000ULL; + break; + case IPR_DTB_TAG1: + case IPR_DTB_ASN1: + case IPR_DTB_PTE1: + break; /* DTAG */ + case IPR_ITB_PTE: + env->a21264.itb_pte = val & 0x00000fffffffef70ULL; + insert_itlb_21264(env, env->a21264.itb_tag, env->a21264.itb_pte); + break; + case IPR_DTB_PTE0: + env->a21264.dtb_pte = val &= 0x7fffffff0000ffe6ULL; + insert_dtlb_21264(env, env->a21264.dtb_tag, env->a21264.dtb_pte); + break; + case IPR_DTB_ASN0: + env->a21264.dtb_asn = (val >> IPR_DTB_ASN_SHIFT) & 0xff; + break; + case IPR_PAL_BASE: + env->pal_base = val & 0x00000fffffff8000ULL; + break; + case IPR_I_CTL: + { + unsigned char old_sde1 = env->a21264.sde1; + env->a21264.i_vptb = + ((((int64_t)val) << 16) >> 16) & 0xffffffffc0000000ULL; + if ((val >> IPR_I_CTL_HWE_SHIFT) & 1) + cpu_abort (env, "mtpr i_ctl: hwe not yet handled\n"); + env->a21264.sde1 = (val >> IPR_I_CTL_SDE1_SHIFT) & 1; + env->a21264.iva_48 = (val >> IPR_I_CTL_VA_48_SHIFT) & 3; + env->a21264.itlb.spe = (val >> IPR_I_CTL_SPE_SHIFT) & 7; + env->a21264.call_pal_r23 = (val >> IPR_I_CTL_CALL_PAL_R23_SHIFT) & 1; + if (env->pal_mode && old_sde1 != env->a21264.sde1) + swap_shadow_21264(env); break; - case IPR_KSP: - if (!(env->ipr[IPR_EXC_ADDR] & 1)) { - ret = -1; - } else { - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_KSP]; - else - *valp = ldq_raw(hwpcb + 0); + } + case IPR_VA_CTL: + env->a21264.d_vptb = val & 0xffffffffc0000000ULL; + env->a21264.dva_48 = (val >> IPR_VA_CTL_VA_48_SHIFT) & 3; + /* env->a21264.b_endian = val & 1; */ + if (val & 1) + cpu_abort (env, "mtpr va_ctl: b_endian not yet handled\n"); + break; + case IPR_IER_CM: + case IPR_CM: + case IPR_IER: + if (iprn & 2) { + env->a21264.ier = val & IPR_IER_MASK; + env->a21264.isum = env->a21264.ipend & env->a21264.ier; + } + if (iprn & 1) { + env->a21264.cm = (val & IPR_CM_MASK) >> IPR_CM_SHIFT; + env->mmu_data_index = env->a21264.cm; } break; - case IPR_MCES: - *valp = ((int64_t)(env->ipr[IPR_MCES] << 59)) >> 59; - break; - case IPR_PERFMON: - /* Implementation specific */ - *valp = 0; + case IPR_IC_FLUSH: + case IPR_IC_FLUSH_ASM: + // tb_flush(env); break; - case IPR_PCBB: - *valp = ((int64_t)env->ipr[IPR_PCBB] << 16) >> 16; + case IPR_ITB_IA: + tlb_flush(env, 1); + flush_tlb_21264(env, &env->a21264.itlb); break; - case IPR_PRBR: - *valp = env->ipr[IPR_PRBR]; + case IPR_DTB_IA: + tlb_flush(env, 1); + flush_tlb_21264(env, &env->a21264.dtlb); break; - case IPR_PTBR: - *valp = env->ipr[IPR_PTBR]; + case IPR_DTB_IS0: + val &= TARGET_PAGE_MASK; + tlb_flush_page(env, val); + flush_tlb_21264_page(env, &env->a21264.dtlb, val); break; - case IPR_SCBB: - *valp = (int64_t)((int32_t)env->ipr[IPR_SCBB]); + case IPR_DTB_IS1: break; - case IPR_SIRR: - /* Write-only */ - ret = -1; + case IPR_I_STAT: /* Not emulated */ + case IPR_DC_STAT: /* Not emulated */ break; - case IPR_SISR: - *valp = (int64_t)((int16_t)env->ipr[IPR_SISR]); - case IPR_SSP: - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_SSP]; - else - *valp = ldq_raw(hwpcb + 16); + case IPR_MM_STAT: /* RO */ break; - case IPR_SYSPTBR: - if (env->features & FEATURE_VIRBND) - *valp = env->ipr[IPR_SYSPTBR]; - else - ret = -1; - break; - case IPR_TBCHK: - if ((env->features & FEATURE_TBCHK)) { - /* XXX: TODO */ - *valp = 0; - ret = -1; - } else { - ret = -1; + case IPR_PCTX ... IPR_PCTX_ALL: + if (iprn & IPR_PCTX_ASN) { + env->a21264.asn = (val >> IPR_PCTX_ASN_SHIFT) & 0xff; + tlb_flush(env, 1); + tb_flush(env); } + if (iprn & IPR_PCTX_ASTRR) { + env->a21264.astrr = (val >> IPR_PCTX_ASTRR_SHIFT) & 0xf; + if (env->a21264.astrr) + cpu_abort(env, "set pctx.astrr unhandled"); + } + if (iprn & IPR_PCTX_ASTER) { + env->a21264.aster = (val >> IPR_PCTX_ASTER_SHIFT) & 0xf; + if (env->a21264.aster) + cpu_abort(env, "set pctx.aster unhandled"); + } + if (iprn & IPR_PCTX_FPE) { + env->a21264.fpe = (val >> IPR_PCTX_FPE_SHIFT) & 1; + if (!env->a21264.fpe) + cpu_abort(env, "set pctx.fpe unhandled"); + } + if (iprn & IPR_PCTX_PPCE) + env->a21264.ppce = (val >> IPR_PCTX_PPCE_SHIFT) & 1; break; - case IPR_TBIA: - /* Write-only */ - ret = -1; - break; - case IPR_TBIAP: - /* Write-only */ - ret = -1; - break; - case IPR_TBIS: - /* Write-only */ - ret = -1; + case IPR_M_CTL: + env->a21264.dtlb.spe = + (val >> IPR_M_CTL_SPE_SHIFT) & IPR_M_CTL_SPE_MASK; break; - case IPR_TBISD: - /* Write-only */ - ret = -1; + case IPR_SIRR: + env->a21264.sirr = val & IPR_SIRR_MASK; + env->a21264.ipend = ((env->a21264.ipend & ~IPR_SIRR_MASK) + | env->a21264.sirr); + env->a21264.isum = env->a21264.ipend & env->a21264.ier; break; - case IPR_TBISI: - /* Write-only */ - ret = -1; + case IPR_HW_INT_CLR: break; - case IPR_USP: - if (env->features & FEATURE_SPS) - *valp = env->ipr[IPR_USP]; - else - *valp = ldq_raw(hwpcb + 24); + case IPR_DTB_ALTMODE0: + env->a21264.altmode = val & IPR_DTB_ALTMODE_MASK; break; - case IPR_VIRBND: - if (env->features & FEATURE_VIRBND) - *valp = env->ipr[IPR_VIRBND]; - else - ret = -1; + case IPR_PCTR_CTL: + /* Not emulated. */ break; - case IPR_VPTB: - *valp = env->ipr[IPR_VPTB]; + case IPR_C_DATA: + case IPR_C_SHIFT: break; - case IPR_WHAMI: - *valp = env->ipr[IPR_WHAMI]; + case 0x2d: /* Not documented (M_FIX) */ + /* Hack: save srm. */ + if (env->a21264.pal_reloc_val) + alpha_21264_srm_write(env); break; default: - /* Invalid */ - ret = -1; - break; + cpu_abort(env, "cpu_alpha_mtpr_21264: ipr 0x%x not handled\n", iprn); } +} - return ret; +void init_cpu_21264(CPUState *env) +{ + env->pal_base = 0; + env->a21264.chip_id = 0x21; + env->a21264.ic_en = 3; + env->pal_emul = PAL_21264; + memset (&env->a21264.itlb, 0, sizeof (env->a21264.itlb)); + memset (&env->a21264.dtlb, 0, sizeof (env->a21264.dtlb)); } -int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp) +void swap_shadow_21264(CPUState *env) { - uint64_t hwpcb, tmp64; - uint8_t tmp8; - int ret = 0; +#define swap(a, b) do { uint64_t t = a; a = b; b = t; } while (0) + swap(env->a21264.shadow_r4, env->ir[4]); + swap(env->a21264.shadow_r5, env->ir[5]); + swap(env->a21264.shadow_r6, env->ir[6]); + swap(env->a21264.shadow_r7, env->ir[7]); + swap(env->a21264.shadow_r20, env->ir[20]); + swap(env->a21264.shadow_r21, env->ir[21]); + swap(env->a21264.shadow_r22, env->ir[22]); + swap(env->a21264.shadow_r23, env->ir[23]); +#undef swap +} - hwpcb = env->ipr[IPR_PCBB]; - switch (iprn) { - case IPR_ASN: - /* Read-only */ - ret = -1; - break; - case IPR_ASTEN: - tmp8 = ((int8_t)(env->ipr[IPR_ASTEN] << 4)) >> 4; - *oldvalp = tmp8; - tmp8 &= val & 0xF; - tmp8 |= (val >> 4) & 0xF; - env->ipr[IPR_ASTEN] &= ~0xF; - env->ipr[IPR_ASTEN] |= tmp8; - ret = 1; - break; - case IPR_ASTSR: - tmp8 = ((int8_t)(env->ipr[IPR_ASTSR] << 4)) >> 4; - *oldvalp = tmp8; - tmp8 &= val & 0xF; - tmp8 |= (val >> 4) & 0xF; - env->ipr[IPR_ASTSR] &= ~0xF; - env->ipr[IPR_ASTSR] |= tmp8; - ret = 1; - case IPR_DATFX: - env->ipr[IPR_DATFX] &= ~0x1; - env->ipr[IPR_DATFX] |= val & 1; - tmp64 = ldq_raw(hwpcb + 56); - tmp64 &= ~0x8000000000000000ULL; - tmp64 |= (val & 1) << 63; - stq_raw(hwpcb + 56, tmp64); - break; - case IPR_ESP: - if (env->features & FEATURE_SPS) - env->ipr[IPR_ESP] = val; - else - stq_raw(hwpcb + 8, val); - break; - case IPR_FEN: - env->ipr[IPR_FEN] = val & 1; - tmp64 = ldq_raw(hwpcb + 56); - tmp64 &= ~1; - tmp64 |= val & 1; - stq_raw(hwpcb + 56, tmp64); - break; - case IPR_IPIR: - /* XXX: TODO: Send IRQ to CPU #ir[16] */ - break; - case IPR_IPL: - *oldvalp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59; - env->ipr[IPR_IPL] &= ~0x1F; - env->ipr[IPR_IPL] |= val & 0x1F; - /* XXX: may issue an interrupt or ASR _now_ */ - ret = 1; - break; - case IPR_KSP: - if (!(env->ipr[IPR_EXC_ADDR] & 1)) { - ret = -1; - } else { - if (env->features & FEATURE_SPS) - env->ipr[IPR_KSP] = val; - else - stq_raw(hwpcb + 0, val); - } - break; - case IPR_MCES: - env->ipr[IPR_MCES] &= ~((val & 0x7) | 0x18); - env->ipr[IPR_MCES] |= val & 0x18; - break; - case IPR_PERFMON: - /* Implementation specific */ - *oldvalp = 0; - ret = 1; - break; - case IPR_PCBB: - /* Read-only */ - ret = -1; - break; - case IPR_PRBR: - env->ipr[IPR_PRBR] = val; - break; - case IPR_PTBR: - /* Read-only */ - ret = -1; - break; - case IPR_SCBB: - env->ipr[IPR_SCBB] = (uint32_t)val; - break; - case IPR_SIRR: - if (val & 0xF) { - env->ipr[IPR_SISR] |= 1 << (val & 0xF); - /* XXX: request a software interrupt _now_ */ - } - break; - case IPR_SISR: - /* Read-only */ - ret = -1; - break; - case IPR_SSP: - if (env->features & FEATURE_SPS) - env->ipr[IPR_SSP] = val; - else - stq_raw(hwpcb + 16, val); - break; - case IPR_SYSPTBR: - if (env->features & FEATURE_VIRBND) - env->ipr[IPR_SYSPTBR] = val; - else - ret = -1; - case IPR_TBCHK: - /* Read-only */ - ret = -1; - break; - case IPR_TBIA: - tlb_flush(env, 1); - break; - case IPR_TBIAP: - tlb_flush(env, 1); - break; - case IPR_TBIS: - tlb_flush_page(env, val); - break; - case IPR_TBISD: - tlb_flush_page(env, val); - break; - case IPR_TBISI: - tlb_flush_page(env, val); - break; - case IPR_USP: - if (env->features & FEATURE_SPS) - env->ipr[IPR_USP] = val; - else - stq_raw(hwpcb + 24, val); - break; - case IPR_VIRBND: - if (env->features & FEATURE_VIRBND) - env->ipr[IPR_VIRBND] = val; - else - ret = -1; - break; - case IPR_VPTB: - env->ipr[IPR_VPTB] = val; - break; - case IPR_WHAMI: - /* Read-only */ - ret = -1; +void cpu_alpha_update_irq (CPUState *env, int irqs) +{ + switch (env->pal_emul) { + case PAL_21264: + env->a21264.ipend &= ~(0x3fULL << 33); + env->a21264.ipend |= ((uint64_t)irqs) << 33; + env->a21264.isum = env->a21264.ipend & env->a21264.ier; + if (env->a21264.isum && !env->pal_mode) + cpu_interrupt(env, CPU_INTERRUPT_HARD); break; default: - /* Invalid */ - ret = -1; + abort(); break; } - - return ret; } void do_interrupt (CPUState *env) { int excp; - env->ipr[IPR_EXC_ADDR] = env->pc | 1; + if (env->pal_mode && env->exception_index == EXCP_GEN_INTERRUPT) { + /* Can this happen ? Maybe if the basic block finishes with a + palcall. */ + cpu_abort(env, "do_interrupt: pal_mode=1\n"); + } + + env->exc_addr = env->pc | env->pal_mode; excp = env->exception_index; env->exception_index = 0; env->error_code = 0; - /* XXX: disable interrupts and memory mapping */ - if (env->ipr[IPR_PAL_BASE] != -1ULL) { + env->pal_mode = 1; + env->mmu_code_index = MMU_PAL_IDX; + + /* Generic exception translation. */ + if (excp <= EXCP_GEN_LAST) { + switch (env->pal_emul) { + case PAL_21264: + switch (excp) { + case EXCP_GEN_OPCDEC: excp = EXCP_21264_OPCDEC; break; + case EXCP_GEN_ARITH: excp = EXCP_21264_ARITH; break; + case EXCP_GEN_INTERRUPT: excp = EXCP_21264_INTERRUPT; break; + default: + abort(); + } + break; + case PAL_NONE: + cpu_abort(env, "do_interrupt: pal emul not supported\n"); + } + } + + switch (env->pal_emul) { + case PAL_21264: + if (env->a21264.sde1 && !(env->exc_addr & 1)) + swap_shadow_21264(env); + if ((excp & EXCP_CALL_PALP) && env->a21264.call_pal_r23) + env->ir[23] = env->pc; + if (excp == EXCP_21264_INTERRUPT) + cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); + break; + default: + break; + } + + + /* We use native PALcode */ + env->pc = env->pal_base + excp; + +#if 0 + if (env->pal_base != -1ULL) { /* We use native PALcode */ - env->pc = env->ipr[IPR_PAL_BASE] + excp; + env->pc = env->pal_base + excp; } else { /* We use emulated PALcode */ call_pal(env); /* Emulate REI */ - env->pc = env->ipr[IPR_EXC_ADDR] & ~7; - env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1; + env->pc = env->exc_addr & ~3ULL; + /* env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1; */ /* XXX: re-enable interrupts and memory mapping */ } +#endif } #endif @@ -419,8 +614,8 @@ void cpu_dump_state (CPUState *env, FILE *f, }; int i; - cpu_fprintf(f, " PC " TARGET_FMT_lx " PS " TARGET_FMT_lx "\n", - env->pc, env->ps); + cpu_fprintf(f, " PC " TARGET_FMT_lx " pal=%d\n", + env->pc, env->pal_mode); for (i = 0; i < 31; i++) { cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i, linux_reg_names[i], env->ir[i]); @@ -434,5 +629,6 @@ void cpu_dump_state (CPUState *env, FILE *f, if ((i % 3) == 2) cpu_fprintf(f, "\n"); } - cpu_fprintf(f, "\nlock " TARGET_FMT_lx "\n", env->lock); + cpu_fprintf(f, "\n"); + /* cpu_fprintf(f, "\nlock " TARGET_FMT_lx "\n", env->lock); */ } diff --git a/target-alpha/helper.h b/target-alpha/helper.h index 7c7278f..eaab3d0 100644 --- a/target-alpha/helper.h +++ b/target-alpha/helper.h @@ -111,11 +111,9 @@ DEF_HELPER_0(hw_rei, void) DEF_HELPER_1(hw_ret, void, i64) DEF_HELPER_2(mfpr, i64, int, i64) DEF_HELPER_2(mtpr, void, int, i64) -DEF_HELPER_0(set_alt_mode, void) -DEF_HELPER_0(restore_mode, void) -DEF_HELPER_1(ld_virt_to_phys, i64, i64) -DEF_HELPER_1(st_virt_to_phys, i64, i64) +DEF_HELPER_2(21264_hw_ldq, i64, i64, i32) +DEF_HELPER_2(21264_hw_ldl, i64, i64, i32) DEF_HELPER_1(ldl_phys, i64, i64) DEF_HELPER_1(ldq_phys, i64, i64) DEF_HELPER_1(ldl_l_phys, i64, i64) diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c index 9e9289f..19b2464 100644 --- a/target-alpha/op_helper.c +++ b/target-alpha/op_helper.c @@ -27,6 +27,8 @@ #include "softmmu_exec.h" #endif /* !defined(CONFIG_USER_ONLY) */ +extern uint64_t cpu_get_ticks(void); + void helper_tb_flush (void) { tb_flush(env); @@ -58,8 +60,22 @@ uint64_t helper_amask (uint64_t arg) uint64_t helper_load_pcc (void) { - /* XXX: TODO */ +#ifdef CONFIG_USER_ONLY + /* FIXME */ return 0; +#else + uint32_t res; + + switch (env->pal_emul) { + case PAL_21264: + res = env->a21264.cc_counter; + if (env->a21264.cc_ena) + res += (cpu_get_ticks() - env->a21264.cc_load_ticks) >> 3; + return res | env->a21264.cc_offset; + default: + cpu_abort(env,"load_ppc: bad pal emul\n"); + } +#endif } uint64_t helper_load_implver (void) @@ -145,7 +161,7 @@ uint64_t helper_addqv (uint64_t op1, uint64_t op2) uint64_t tmp = op1; op1 += op2; if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) { - helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); + helper_excp(EXCP_GEN_ARITH, EXCP_ARITH_OVERFLOW); } return op1; } @@ -155,7 +171,7 @@ uint64_t helper_addlv (uint64_t op1, uint64_t op2) uint64_t tmp = op1; op1 = (uint32_t)(op1 + op2); if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) { - helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); + helper_excp(EXCP_GEN_ARITH, EXCP_ARITH_OVERFLOW); } return op1; } @@ -165,7 +181,7 @@ uint64_t helper_subqv (uint64_t op1, uint64_t op2) uint64_t tmp = op1; op1 -= op2; if (unlikely(((~tmp) ^ op1 ^ (-1ULL)) & ((~tmp) ^ op2) & (1ULL << 63))) { - helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); + helper_excp(EXCP_GEN_ARITH, EXCP_ARITH_OVERFLOW); } return op1; } @@ -175,7 +191,7 @@ uint64_t helper_sublv (uint64_t op1, uint64_t op2) uint64_t tmp = op1; op1 = (uint32_t)(op1 - op2); if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) { - helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); + helper_excp(EXCP_GEN_ARITH, EXCP_ARITH_OVERFLOW); } return op1; } @@ -185,7 +201,7 @@ uint64_t helper_mullv (uint64_t op1, uint64_t op2) int64_t res = (int64_t)op1 * (int64_t)op2; if (unlikely((int32_t)res != res)) { - helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); + helper_excp(EXCP_GEN_ARITH, EXCP_ARITH_OVERFLOW); } return (int64_t)((int32_t)res); } @@ -197,7 +213,7 @@ uint64_t helper_mulqv (uint64_t op1, uint64_t op2) muls64(&tl, &th, op1, op2); /* If th != 0 && th != -1, then we had an overflow */ if (unlikely((th + 1) > 1)) { - helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); + helper_excp(EXCP_GEN_ARITH, EXCP_ARITH_OVERFLOW); } return tl; } @@ -390,7 +406,7 @@ static always_inline float32 f_to_float32 (uint64_t a) if (unlikely(!exp && mant_sig)) { /* Reserved operands / Dirty zero */ - helper_excp(EXCP_OPCDEC, 0); + helper_excp(EXCP_GEN_OPCDEC, 0); } if (exp < 3) { @@ -517,7 +533,7 @@ static always_inline float64 g_to_float64 (uint64_t a) if (!exp && mant_sig) { /* Reserved operands / Dirty zero */ - helper_excp(EXCP_OPCDEC, 0); + helper_excp(EXCP_GEN_OPCDEC, 0); } if (exp < 3) { @@ -972,7 +988,7 @@ static always_inline uint64_t __helper_cvtql (uint64_t a, int s, int v) r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29; if (v && (int64_t)((int32_t)r) != (int64_t)r) { - helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); + helper_excp(EXCP_GEN_ARITH, EXCP_ARITH_OVERFLOW); } if (s) { /* TODO */ @@ -999,96 +1015,133 @@ uint64_t helper_cvtqlsv (uint64_t a) #if !defined (CONFIG_USER_ONLY) void helper_hw_rei (void) { - env->pc = env->ipr[IPR_EXC_ADDR] & ~3; - env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1; +#if 0 + /* FIXME: For 21064/21164 only ? */ + env->pc = env->any.ipr[IPR_EXC_ADDR] & ~3; + env->any.ipr[IPR_EXC_ADDR] = env->any.ipr[IPR_EXC_ADDR] & 1; /* XXX: re-enable interrupts and memory mapping */ +#else + cpu_abort(env, "hw_rei not implemented\n"); +#endif } void helper_hw_ret (uint64_t a) { - env->pc = a & ~3; - env->ipr[IPR_EXC_ADDR] = a & 1; - /* XXX: re-enable interrupts and memory mapping */ + switch (env->pal_emul) { + case PAL_21264: + if (!(a & 1) && env->a21264.isum) { +#if 0 + qemu_log("pal mode ret interrupt ier=%016llx, isum=%016llx ir=%x\n", + env->a21264.ier, env->a21264.isum, + env->interrupt_request); +#endif + /* Very fast interrupt delivery! */ + env->exc_addr = a; + env->pc = env->pal_base + EXCP_21264_INTERRUPT; + env->interrupt_request &= ~CPU_INTERRUPT_HARD; + break; + } + env->pc = a & ~3; + env->pal_mode = a & 1; + if (!env->pal_mode) + env->mmu_code_index = env->mmu_data_index; + if (env->a21264.sde1 && !(a & 1)) + swap_shadow_21264(env); + break; + case PAL_NONE: + cpu_abort(env, "hw_ret: not supported by pal emulation\n"); + } } uint64_t helper_mfpr (int iprn, uint64_t val) { - uint64_t tmp; - - if (cpu_alpha_mfpr(env, iprn, &tmp) == 0) - val = tmp; - + switch (env->pal_emul) { + case PAL_21264: + return cpu_alpha_mfpr_21264(env, iprn); + break; + case PAL_NONE: + cpu_abort(env, "hw_mfpr: not supported by pal emulation\n"); + } return val; } void helper_mtpr (int iprn, uint64_t val) { - cpu_alpha_mtpr(env, iprn, val, NULL); -} - -void helper_set_alt_mode (void) -{ - env->saved_mode = env->ps & 0xC; - env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC); -} - -void helper_restore_mode (void) -{ - env->ps = (env->ps & ~0xC) | env->saved_mode; + switch (env->pal_emul) { + case PAL_21264: + cpu_alpha_mtpr_21264(env, iprn, val); + return; + case PAL_NONE: + cpu_abort(env, "hw_mtpr: not supported by pal emulation\n"); + } } - #endif /*****************************************************************************/ /* Softmmu support */ #if !defined (CONFIG_USER_ONLY) -/* XXX: the two following helpers are pure hacks. - * Hopefully, we emulate the PALcode, then we should never see - * HW_LD / HW_ST instructions. - */ -uint64_t helper_ld_virt_to_phys (uint64_t virtaddr) +static uint64_t hw_ld_virt2phys (uint64_t virtaddr, uint32_t v2p_flags) +{ + int mmu_idx; + struct alpha_pte pte; + + mmu_idx = v2p_flags & ALPHA_HW_MMUIDX_MASK; + pte = cpu_alpha_mmu_v2p_21264(env, virtaddr, 0); + if (!(pte.fl & ALPHA_PTE_V)) { + if (v2p_flags & ALPHA_HW_V) { + /* Virtual pte access. */ + env->exception_index = env->a21264.iva_48 ? + EXCP_21264_DTBM_DOUBLE_4 : EXCP_21264_DTBM_DOUBLE_3; + } else + env->exception_index = EXCP_21264_DTBM_SINGLE; + cpu_alpha_mmu_dfault_21264(env, virtaddr, 0x03); + cpu_loop_exit(); + } + if ((v2p_flags & ALPHA_HW_W) + && (!((pte.fl >> mmu_idx) & ALPHA_PTE_KRE) + || (pte.fl & ALPHA_PTE_FOR))) { + env->exception_index = EXCP_21264_DFAULT; + cpu_alpha_mmu_dfault_21264(env, virtaddr, 0x03); + cpu_loop_exit(); + } + return (((uint64_t)pte.pa) << 13) | (virtaddr & ~TARGET_PAGE_MASK); +} + +uint64_t helper_21264_hw_ldq (uint64_t virtaddr, uint32_t v2p_flags) { uint64_t tlb_addr, physaddr; int index, mmu_idx; - void *retaddr; - mmu_idx = cpu_mmu_index_data(env); + mmu_idx = v2p_flags & ALPHA_HW_MMUIDX_MASK; index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: tlb_addr = env->tlb_table[mmu_idx][index].addr_read; if ((virtaddr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend; + return ldq_raw((uint8_t *)physaddr); } else { - /* the page is not in the TLB : fill it */ - retaddr = GETPC(); - tlb_fill(virtaddr, 0, mmu_idx, retaddr); - goto redo; + physaddr = hw_ld_virt2phys(virtaddr, v2p_flags); + return ldq_phys(physaddr); } - return physaddr; } -uint64_t helper_st_virt_to_phys (uint64_t virtaddr) +uint64_t helper_21264_hw_ldl (uint64_t virtaddr, uint32_t v2p_flags) { uint64_t tlb_addr, physaddr; int index, mmu_idx; - void *retaddr; - mmu_idx = cpu_mmu_index_data(env); + mmu_idx = v2p_flags & ALPHA_HW_MMUIDX_MASK; index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + tlb_addr = env->tlb_table[mmu_idx][index].addr_read; if ((virtaddr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend; + return ldl_raw((uint8_t *)physaddr); } else { - /* the page is not in the TLB : fill it */ - retaddr = GETPC(); - tlb_fill(virtaddr, 1, mmu_idx, retaddr); - goto redo; + physaddr = hw_ld_virt2phys(virtaddr, v2p_flags); + return ldl_phys(physaddr); } - return physaddr; } uint64_t helper_ldl_phys(uint64_t addr) @@ -1185,31 +1238,29 @@ uint64_t helper_stq_c_phys(uint64_t val, uint64_t addr) NULL, it means that the function was called in C code (i.e. not from generated code or from helper.c) */ /* XXX: fix it to restore all registers */ -void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) +void tlb_fill (target_ulong addr, int rwx, int mmu_idx, void *retaddr) { - TranslationBlock *tb; CPUState *saved_env; - unsigned long pc; int ret; /* XXX: hack to restore env in all cases, even if not called from generated code */ saved_env = env; env = cpu_single_env; - ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1); - if (!likely(ret == 0)) { - if (likely(retaddr)) { - /* now we have a real cpu fault */ - pc = (unsigned long)retaddr; - tb = tb_find_pc(pc); - if (likely(tb)) { - /* the PC is inside the translated code. It means that we have - a virtual CPU fault */ - cpu_restore_state(tb, env, pc, NULL); - } + + if (rwx == 2 && mmu_idx == MMU_PAL_IDX) + cpu_alpha_mmu_fault_pal(env, addr); + else { + switch (env->pal_emul) { + case PAL_21264: + ret = cpu_alpha_mmu_fault_21264(env, addr, rwx, mmu_idx, retaddr); + if (!likely(ret == 0)) + /* Exception index and error code are already set */ + cpu_loop_exit(); + break; + case PAL_NONE: + cpu_abort(env, "tlb_fill: not supported by pal emulation\n"); } - /* Exception index and error code are already set */ - cpu_loop_exit(); } env = saved_env; } diff --git a/target-alpha/translate.c b/target-alpha/translate.c index 67410b3..bcee5d7 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -47,6 +47,7 @@ typedef struct DisasContext DisasContext; struct DisasContext { uint64_t pc; + CPUAlphaState *env; int mem_idx; #if !defined (CONFIG_USER_ONLY) int pal_mode; @@ -106,7 +107,7 @@ static void alpha_translate_init(void) static inline int get_mxcr_iprn(DisasContext *ctx, uint32_t insn) { - switch (ctx->implver) { + switch (ctx->env->implver) { case IMPLVER_2106x: return insn & 0xff; case IMPLVER_21164: @@ -118,6 +119,43 @@ static inline int get_mxcr_iprn(DisasContext *ctx, uint32_t insn) } } +#if !defined (CONFIG_USER_ONLY) +static inline void gen_hw_ld(DisasContext *ctx, int ra, TCGv addr, + int hw_flags) +{ + TCGv_i32 flags = tcg_temp_new_i32(); + + tcg_gen_movi_i64(cpu_pc, ctx->pc - 4); + tcg_gen_movi_i32(flags, ctx->mem_idx | hw_flags); + switch (ctx->env->pal_emul) { + case PAL_21264: + if (hw_flags & ALPHA_HW_L) + gen_helper_21264_hw_ldl(cpu_ir[ra], addr, flags); + else if (hw_flags & ALPHA_HW_Q) + gen_helper_21264_hw_ldq(cpu_ir[ra], addr, flags); + else + abort(); + break; + default: + cpu_abort(ctx->env, "gen_hw_ld: pal emul %d not supported\n", + ctx->env->pal_emul); + } + tcg_temp_free_i32(flags); +} + +static inline void gen_hw_st(DisasContext *ctx, int ra, TCGv addr, + int hw_flags) +{ + TCGv_i32 flags = tcg_temp_new_i32(); + + tcg_gen_movi_i64(cpu_pc, ctx->pc - 4); + tcg_gen_movi_i32(flags, ctx->mem_idx | hw_flags); + cpu_abort(ctx->env, "gen_hw_st: pal emul %d not supported\n", + ctx->env->pal_emul); + tcg_temp_free_i32(flags); +} +#endif /* CONFIG_USER_ONLY */ + static always_inline void gen_excp (DisasContext *ctx, int exception, int error_code) { @@ -133,7 +171,7 @@ static always_inline void gen_excp (DisasContext *ctx, static always_inline void gen_invalid (DisasContext *ctx) { - gen_excp(ctx, EXCP_OPCDEC, 0); + gen_excp(ctx, EXCP_GEN_OPCDEC, 0); } static always_inline void gen_qemu_ldf (TCGv t0, TCGv t1, int flags) @@ -1806,6 +1844,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); else tcg_gen_movi_i64(addr, disp12); + /* Data alignment check is disabled for hw_ld */ + tcg_gen_andi_i64(addr, addr, ((insn >> 12) & 1) ? ~0x7 : ~0x3); switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access (hw_ldl/p) */ @@ -1825,11 +1865,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x4: /* Longword virtual PTE fetch (hw_ldl/v) */ - tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_V | ALPHA_HW_L); break; case 0x5: /* Quadword virtual PTE fetch (hw_ldq/v) */ - tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_V | ALPHA_HW_Q); break; case 0x6: /* Incpu_ir[ra]id */ @@ -1839,51 +1879,37 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) goto invalid_opc; case 0x8: /* Longword virtual access (hw_ldl) */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldl_phys(cpu_ir[ra], addr); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_L); break; case 0x9: /* Quadword virtual access (hw_ldq) */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldq_phys(cpu_ir[ra], addr); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_Q); break; case 0xA: /* Longword virtual access with protection check (hw_ldl/w) */ - tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_W | ALPHA_HW_L); break; case 0xB: /* Quadword virtual access with protection check (hw_ldq/w) */ - tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_W | ALPHA_HW_Q); break; case 0xC: /* Longword virtual access with alt access mode (hw_ldl/a)*/ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldl_phys(cpu_ir[ra], addr); - gen_helper_restore_mode(); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_A | ALPHA_HW_L); break; case 0xD: /* Quadword virtual access with alt access mode (hw_ldq/a) */ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_ldq_phys(cpu_ir[ra], addr); - gen_helper_restore_mode(); + gen_hw_ld(ctx, ra, addr, ALPHA_HW_A | ALPHA_HW_Q); break; case 0xE: /* Longword virtual access with alternate access mode and - * protection checks (hw_ldl/wa) - */ - gen_helper_set_alt_mode(); - gen_helper_ldl_data(cpu_ir[ra], addr); - gen_helper_restore_mode(); + * protection checks (hw_ldl/wa) */ + gen_hw_ld(ctx, ra, addr, ALPHA_HW_A | ALPHA_HW_W | ALPHA_HW_L); break; case 0xF: /* Quadword virtual access with alternate access mode and - * protection checks (hw_ldq/wa) - */ - gen_helper_set_alt_mode(); - gen_helper_ldq_data(cpu_ir[ra], addr); - gen_helper_restore_mode(); + * protection checks (hw_ldq/wa) */ + gen_hw_ld(ctx, ra, addr, ALPHA_HW_A | ALPHA_HW_W | ALPHA_HW_Q); break; } tcg_temp_free(addr); @@ -2103,7 +2129,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) gen_helper_hw_rei(); } else { TCGv tmp; - + if (rb != 31) { tmp = tcg_temp_new(); tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51)); @@ -2135,6 +2161,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) val = tcg_temp_new(); tcg_gen_movi_i64(val, 0); } + LOG_DISAS("pal1f acc=%d\n", (insn >> 12) & 0xf); switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access */ @@ -2154,13 +2181,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) break; case 0x4: /* Longword virtual access */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stl_phys(val, addr); + gen_hw_st(ctx, addr, val, ALPHA_HW_L); break; case 0x5: /* Quadword virtual access */ - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stq_phys(val, addr); + gen_hw_st(ctx, addr, val, ALPHA_HW_Q); break; case 0x6: /* Invalid */ @@ -2182,17 +2207,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) goto invalid_opc; case 0xC: /* Longword virtual access with alternate access mode */ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stl_phys(val, addr); - gen_helper_restore_mode(); + gen_hw_st(ctx, addr, val, ALPHA_HW_A | ALPHA_HW_L); break; case 0xD: /* Quadword virtual access with alternate access mode */ - gen_helper_set_alt_mode(); - gen_helper_st_virt_to_phys(addr, addr); - gen_helper_stl_phys(val, addr); - gen_helper_restore_mode(); + gen_hw_st(ctx, addr, val, ALPHA_HW_A | ALPHA_HW_Q); break; case 0xE: /* Invalid */ @@ -2367,12 +2386,12 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; ctx.pc = pc_start; ctx.amask = env->amask; - ctx.implver = env->implver; + ctx.env = env; #if defined (CONFIG_USER_ONLY) ctx.mem_idx = 0; #else - ctx.mem_idx = ((env->ps >> 3) & 3); - ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1; + ctx.mem_idx = env->mmu_data_index; + ctx.pal_mode = env->pal_mode; #endif num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; @@ -2428,7 +2447,7 @@ static always_inline void gen_intermediate_code_internal (CPUState *env, if (env->singlestep_enabled) { gen_excp(&ctx, EXCP_DEBUG, 0); break; - } + } #if defined (DO_SINGLE_STEP) break; @@ -2501,7 +2520,6 @@ void alpha_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) CPUAlphaState * cpu_alpha_init (const char *cpu_model) { CPUAlphaState *env; - uint64_t hwpcb; const struct alpha_def *cpu = alpha_defs; if (cpu_model != NULL) { @@ -2520,31 +2538,24 @@ CPUAlphaState * cpu_alpha_init (const char *cpu_model) alpha_translate_init(); tlb_flush(env, 1); + env->pal_emul = PAL_NONE; env->implver = cpu->implver; env->amask = cpu->amask; - env->ps = 0x1F00; -#if defined (CONFIG_USER_ONLY) - env->ps |= 1 << 3; -#endif - /* Initialize IPR */ - hwpcb = env->ipr[IPR_PCBB]; - env->ipr[IPR_ASN] = 0; - env->ipr[IPR_ASTEN] = 0; - env->ipr[IPR_ASTSR] = 0; - env->ipr[IPR_DATFX] = 0; - /* XXX: fix this */ - // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8); - // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0); - // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16); - // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24); - env->ipr[IPR_FEN] = 0; - env->ipr[IPR_IPL] = 31; - env->ipr[IPR_MCES] = 0; - env->ipr[IPR_PERFMON] = 0; /* Implementation specific */ - // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32); - env->ipr[IPR_SISR] = 0; - env->ipr[IPR_VIRBND] = -1ULL; +#ifndef CONFIG_USER_ONLY + env->pal_mode = 1; + env->mmu_code_index = 4; + env->mmu_data_index = 0; + + switch (env->implver) { + case IMPLVER_21264: + init_cpu_21264(env); + break; + default: + abort(); + break; + } +#endif return env; } -- 1.6.2