--- linux-2.6.9/include/asm-ia64/bug.h.icc 2004-10-18 14:55:36.000000000 -0700 +++ linux-2.6.9/include/asm-ia64/bug.h 2005-12-09 12:52:57.000000000 -0800 @@ -1,11 +1,8 @@ #ifndef _ASM_IA64_BUG_H #define _ASM_IA64_BUG_H -#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1) -# define ia64_abort() __builtin_trap() -#else -# define ia64_abort() (*(volatile int *) 0 = 0) -#endif +#include + #define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) /* should this BUG should be made generic? */ --- linux-2.6.9/include/asm-ia64/gcc_intrin.h.icc 2004-10-18 14:54:07.000000000 -0700 +++ linux-2.6.9/include/asm-ia64/gcc_intrin.h 2005-12-09 14:48:06.000000000 -0800 @@ -388,6 +388,44 @@ register unsigned long ia64_r13 asm ("r1 # define ia64_dv_serialize_instruction() #endif +#define ia64_st1_rel(dst, val) \ + asm volatile ("st1.rel [%0]=%1":: "r"(dst), "r"(val) : "memory") +#define ia64_st2_rel(dst, val) \ + asm volatile ("st2.rel [%0]=%1":: "r"(dst), "r"(val) : "memory") +#define ia64_st4_rel(dst, val) \ + asm volatile ("st4.rel [%0]=%1":: "r"(dst), "r"(val) : "memory") +#define ia64_st8_rel(dst, val) \ + asm volatile ("st8.rel [%0]=%1":: "r"(dst), "r"(val) : "memory") + +#define ia64_ld1_acq(src) \ +({ \ + __u8 val; \ + asm volatile ("ld1.acq %0=[%1]" \ + : "r"(val) : "r"(src) : "memory"); \ + val; \ +}) +#define ia64_ld2_acq(src) \ +({ \ + __u16 val; \ + asm volatile ("ld2.acq %0=[%1]" \ + : "r"(val) : "r"(src) : "memory"); \ + val; \ +}) +#define ia64_ld4_acq(src) \ +({ \ + __u32 val; \ + asm volatile ("ld4.acq %0=[%1]" \ + : "r"(val) : "r"(src) : "memory"); \ + val; \ +}) +#define ia64_ld8_acq(src) \ +({ \ + __u64 val; \ + asm volatile ("ld8.acq %0=[%1]" \ + : "r"(val) : "r"(src) : "memory"); \ + val; \ +}) + #define ia64_nop(x) asm volatile ("nop %0"::"i"(x)); #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory") @@ -594,4 +632,23 @@ do { \ :: "r"((x)) : "p6", "p7", "memory"); \ } while (0) + +#define ia64_getpsr() \ +({ \ + __u64 psr; \ + asm volatile ("mov %0=psr" : "=r"(psr)); \ + psr; \ +}) + +#define ia64_setpsrlow(l) \ +({ \ + asm volatile ("mov psr.l=%0" :: "r"(l)); \ +}) + +#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1) +# define ia64_abort() __builtin_trap() +#else +# define ia64_abort() (*(volatile int *) 0 = 0) +#endif + #endif /* _ASM_IA64_GCC_INTRIN_H */ --- linux-2.6.9/include/asm-ia64/intel_intrin.h.icc 2005-12-09 12:52:57.000000000 -0800 +++ linux-2.6.9/include/asm-ia64/intel_intrin.h 2005-12-09 12:52:57.000000000 -0800 @@ -1,4 +1,159 @@ #ifndef _ASM_IA64_INTEL_INTRIN_H #define _ASM_IA64_INTEL_INTRIN_H +/* + * Intel Compiler Intrinsics + * + * Copyright (C) 2002,2003 Jun Nakajima + * Copyright (C) 2002,2003 Suresh Siddha + * + */ +#include + +#define ia64_barrier() __memory_barrier() + +#define ia64_stop() /* Nothing: As of now stop bit is generated for each + * intrinsic + */ + +#define ia64_getreg __getReg +#define ia64_setreg __setReg + +#define ia64_hint(x) + +#define ia64_mux1_brcst 0 +#define ia64_mux1_mix 8 +#define ia64_mux1_shuf 9 +#define ia64_mux1_alt 10 +#define ia64_mux1_rev 11 + +#define ia64_mux1(x,v) \ + _m_to_int64 (_m64_mux1 (_m_from_int64 (x), (v))) +#define ia64_popcnt _m64_popcnt +#define ia64_getf_exp __getf_exp +#define ia64_shrp _m64_shrp + +#define ia64_tpa __tpa +#define ia64_invala __invala +#define ia64_invala_gr __invala_gr +#define ia64_invala_fr __invala_fr +#define ia64_nop __nop +#define ia64_sum __sum +#define ia64_ssm __ssm +#define ia64_rum __rum +#define ia64_rsm __rsm +#define ia64_fc __fc + +#define ia64_ldfs __ldfs +#define ia64_ldfd __ldfd +#define ia64_ldfe __ldfe +#define ia64_ldf8 __ldf8 +#define ia64_ldf_fill __ldf_fill + +#define ia64_stfs __stfs +#define ia64_stfd __stfd +#define ia64_stfe __stfe +#define ia64_stf8 __stf8 +#define ia64_stf_spill __stf_spill + +#define ia64_mf __mf +#define ia64_mfa __mfa + +#define ia64_fetchadd4_acq __fetchadd4_acq +#define ia64_fetchadd4_rel __fetchadd4_rel +#define ia64_fetchadd8_acq __fetchadd8_acq +#define ia64_fetchadd8_rel __fetchadd8_rel + +#define ia64_xchg1 _InterlockedExchange8 +#define ia64_xchg2 _InterlockedExchange16 +#define ia64_xchg4 _InterlockedExchange +#define ia64_xchg8 _InterlockedExchange64 + +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq + +#define __ia64_set_dbr(index, val) \ + __setIndReg(_IA64_REG_INDR_DBR, index, val) +#define ia64_set_ibr(index, val) \ + __setIndReg(_IA64_REG_INDR_IBR, index, val) +#define ia64_set_pkr(index, val) \ + __setIndReg(_IA64_REG_INDR_PKR, index, val) +#define ia64_set_pmc(index, val) \ + __setIndReg(_IA64_REG_INDR_PMC, index, val) +#define ia64_set_pmd(index, val) \ + __setIndReg(_IA64_REG_INDR_PMD, index, val) +#define ia64_set_rr(index, val) \ + __setIndReg(_IA64_REG_INDR_RR, index, val) + +#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index) +#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) +#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) +#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) +#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) +#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) +#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) + +#define ia64_srlz_d __dsrlz +#define ia64_srlz_i __isrlz + +#define ia64_dv_serialize_data() +#define ia64_dv_serialize_instruction() + +#define ia64_st1_rel __st1_rel +#define ia64_st2_rel __st2_rel +#define ia64_st4_rel __st4_rel +#define ia64_st8_rel __st8_rel + +#define ia64_ld1_acq __ld1_acq +#define ia64_ld2_acq __ld2_acq +#define ia64_ld4_acq __ld4_acq +#define ia64_ld8_acq __ld8_acq + +#define ia64_sync_i __synci +#define ia64_thash __thash +#define ia64_ttag __ttag +#define ia64_itcd __itcd +#define ia64_itci __itci +#define ia64_itrd __itrd +#define ia64_itri __itri +#define ia64_ptce __ptce +#define ia64_ptcl __ptcl +#define ia64_ptcg __ptcg +#define ia64_ptcga __ptcga +#define ia64_ptri __ptri +#define ia64_ptrd __ptrd +#define ia64_dep_mi _m64_dep_mi + +/* Values for lfhint in __lfetch and __lfetch_fault */ + +#define ia64_lfhint_none 0 +#define ia64_lfhint_nt1 1 +#define ia64_lfhint_nt2 2 +#define ia64_lfhint_nta 3 + +#define ia64_lfetch __lfetch +#define ia64_lfetch_excl __lfetch_excl +#define ia64_lfetch_fault __lfetch_fault +#define ia64_lfetch_fault_excl __lfetch_fault_excl + +#define ia64_intrin_local_irq_restore(x) \ +do { \ + if ((x) != 0) { \ + ia64_ssm(IA64_PSR_I); \ + ia64_srlz_d(); \ + } else { \ + ia64_rsm(IA64_PSR_I); \ + } \ +} while (0) + +#define ia64_getpsr() __GetPSR () +#define ia64_setpsrlow(l) __SetPSRLow (l) + +#define ia64_abort() __break (0) #endif /* _ASM_IA64_INTEL_INTRIN_H */ --- linux-2.6.9/include/asm-ia64/sn/rw_mmr.h.icc 2005-12-09 12:52:52.000000000 -0800 +++ linux-2.6.9/include/asm-ia64/sn/rw_mmr.h 2005-12-09 12:52:57.000000000 -0800 @@ -25,17 +25,13 @@ extern inline long pio_phys_read_mmr(volatile long *mmr) { - long val; - asm volatile - ("mov r2=psr;;" - "rsm psr.i | psr.dt;;" - "srlz.i;;" - "ld8.acq %0=[%1];;" - "mov psr.l=r2;;" - "srlz.i;;" - : "=r"(val) - : "r"(mmr) - : "r2"); + long val, psr_saved; + psr_saved = ia64_getpsr (); + ia64_rsm (IA64_PSR_I | IA64_PSR_DT); + ia64_srlz_i (); + val = ia64_ld8_acq (mmr); + ia64_setpsrlow (psr_saved); + ia64_srlz_i (); return val; } @@ -44,31 +40,27 @@ pio_phys_read_mmr(volatile long *mmr) extern inline void pio_phys_write_mmr(volatile long *mmr, long val) { - asm volatile - ("mov r2=psr;;" - "rsm psr.i | psr.dt;;" - "srlz.i;;" - "st8.rel [%0]=%1;;" - "mov psr.l=r2;;" - "srlz.i;;" - :: "r"(mmr), "r"(val) - : "r2", "memory"); -} + long psr_saved; + psr_saved = ia64_getpsr (); + ia64_rsm (IA64_PSR_I | IA64_PSR_DT); + ia64_srlz_i (); + ia64_st8_rel (mmr, val); + ia64_setpsrlow (psr_saved); + ia64_srlz_i (); +} extern inline void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2) { - asm volatile - ("mov r2=psr;;" - "rsm psr.i | psr.dt | psr.ic;;" - "cmp.ne p9,p0=%2,r0;" - "srlz.i;;" - "st8.rel [%0]=%1;" - "(p9) st8.rel [%2]=%3;;" - "mov psr.l=r2;;" - "srlz.i;;" - :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2) - : "p9", "r2", "memory"); -} + long psr_saved; + psr_saved = ia64_getpsr (); + ia64_rsm (IA64_PSR_I | IA64_PSR_DT | IA64_PSR_IC); + ia64_srlz_i (); + ia64_st8_rel (mmr1, val1); + if (mmr2 != NULL) + ia64_st8_rel (mmr2, val2); + ia64_setpsrlow (psr_saved); + ia64_srlz_i (); +} #endif /* _ASM_IA64_SN_RW_MMR_H */