diff -ru l2567-ref/arch/ia64/kernel/efi_stub.S l2567-reloc/arch/ia64/kernel/efi_stub.S --- l2567-ref/arch/ia64/kernel/efi_stub.S Mon Apr 7 10:30:33 2003 +++ l2567-reloc/arch/ia64/kernel/efi_stub.S Thu May 8 13:51:25 2003 @@ -62,7 +62,7 @@ mov b6=r2 ;; andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared - br.call.sptk.many rp=ia64_switch_mode + br.call.sptk.many rp=ia64_switch_mode_phys .ret0: mov out4=in5 mov out0=in1 mov out1=in2 @@ -73,7 +73,7 @@ br.call.sptk.many rp=b6 // call the EFI function .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 - br.call.sptk.many rp=ia64_switch_mode // return to virtual mode + br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode .ret2: mov ar.rsc=loc4 // restore RSE configuration mov ar.pfs=loc1 mov rp=loc0 diff -ru l2567-ref/arch/ia64/kernel/entry.S l2567-reloc/arch/ia64/kernel/entry.S --- l2567-ref/arch/ia64/kernel/entry.S Thu May 1 10:19:28 2003 +++ l2567-reloc/arch/ia64/kernel/entry.S Fri May 9 15:21:15 2003 @@ -178,15 +178,12 @@ ;; st8 [r22]=sp // save kernel stack pointer of old task shr.u r26=r20,IA64_GRANULE_SHIFT - shr.u r17=r20,KERNEL_TR_PAGE_SHIFT - ;; - cmp.ne p6,p7=KERNEL_TR_PAGE_NUM,r17 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 ;; /* * If we've already mapped this task's page, we can skip doing it again. */ -(p6) cmp.eq p7,p6=r26,r27 + cmp.eq p7,p6=r26,r27 (p6) br.cond.dpnt .map ;; .done: diff -ru l2567-ref/arch/ia64/kernel/head.S l2567-reloc/arch/ia64/kernel/head.S --- l2567-ref/arch/ia64/kernel/head.S Thu May 1 10:19:28 2003 +++ l2567-reloc/arch/ia64/kernel/head.S Tue May 13 11:05:51 2003 @@ -60,22 +60,42 @@ mov r4=r0 .body - /* - * Initialize the region register for region 7 and install a translation register - * that maps the kernel's text and data: - */ rsm psr.i | psr.ic - mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET) << 8) | (IA64_GRANULE_SHIFT << 2)) ;; srlz.i + ;; + /* + * Initialize kernel region registers: + * rr[5]: VHPT enabled, page size = PAGE_SHIFT + * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT + * rr[5]: VHPT disabled, page size = IA64_GRANULE_SHIFT + */ + mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1) + movl r17=(5<<61) + mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) + movl r19=(6<<61) + mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) + movl r21=(7<<61) + ;; + mov rr[r17]=r16 + mov rr[r19]=r18 + mov rr[r21]=r20 + ;; + /* + * Now pin mappings into the TLB for kernel text and data + */ mov r18=KERNEL_TR_PAGE_SHIFT<<2 movl r17=KERNEL_START ;; - mov rr[r17]=r16 mov cr.itir=r18 mov cr.ifa=r17 mov r16=IA64_TR_KERNEL - movl r18=((1 << KERNEL_TR_PAGE_SHIFT) | PAGE_KERNEL) + mov r3=ip + movl r18=PAGE_KERNEL + ;; + dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT + ;; + or r18=r2,r18 ;; srlz.i ;; @@ -113,16 +133,6 @@ mov ar.fpsr=r2 ;; -#ifdef CONFIG_IA64_EARLY_PRINTK - mov r3=(6<<8) | (IA64_GRANULE_SHIFT<<2) - movl r2=6<<61 - ;; - mov rr[r2]=r3 - ;; - srlz.i - ;; -#endif - #define isAP p2 // are we an Application Processor? #define isBP p3 // are we the Bootstrap Processor? @@ -143,12 +153,36 @@ movl r2=init_thread_union cmp.eq isBP,isAP=r0,r0 #endif - mov r16=KERNEL_TR_PAGE_NUM ;; + tpa r3=r2 // r3 == phys addr of task struct + // load mapping for stack (virtaddr in r2, physaddr in r3) + rsm psr.ic + movl r17=PAGE_KERNEL + ;; + srlz.d + dep r18=0,r3,0,12 + ;; + or r18=r17,r18 + dep r2=-1,r3,61,3 // IMVA of task + ;; + mov r17=rr[r2] + shr.u r16=r3,IA64_GRANULE_SHIFT + ;; + dep r17=0,r17,8,24 + ;; + mov cr.itir=r17 + mov cr.ifa=r2 + + mov r19=IA64_TR_CURRENT_STACK + ;; + itr.d dtr[r19]=r18 + ;; + ssm psr.ic + srlz.d + ;; // load the "current" pointer (r13) and ar.k6 with the current task mov IA64_KR(CURRENT)=r2 // virtual address - // initialize k4 to a safe value (64-128MB is mapped by TR_KERNEL) mov IA64_KR(CURRENT_STACK)=r16 mov r13=r2 /* @@ -665,14 +699,14 @@ END(__ia64_init_fpu) /* - * Switch execution mode from virtual to physical or vice versa. + * Switch execution mode from virtual to physical * * Inputs: * r16 = new psr to establish * * Note: RSE must already be in enforced lazy mode */ -GLOBAL_ENTRY(ia64_switch_mode) +GLOBAL_ENTRY(ia64_switch_mode_phys) { alloc r2=ar.pfs,0,0,0,0 rsm psr.i | psr.ic // disable interrupts and interrupt collection @@ -682,35 +716,86 @@ { flushrs // must be first insn in group srlz.i - shr.u r19=r15,61 // r19 <- top 3 bits of current IP } ;; mov cr.ipsr=r16 // set new PSR - add r3=1f-ia64_switch_mode,r15 - xor r15=0x7,r19 // flip the region bits + add r3=1f-ia64_switch_mode_phys,r15 mov r17=ar.bsp mov r14=rp // get return address into a general register + ;; - // switch RSE backing store: + // going to physical mode, use tpa to translate virt->phys + tpa r17=r17 + tpa r3=r3 + tpa sp=sp + tpa r14=r14 ;; - dep r17=r15,r17,61,3 // make ar.bsp physical or virtual + mov r18=ar.rnat // save ar.rnat - ;; mov ar.bspstore=r17 // this steps on ar.rnat - dep r3=r15,r3,61,3 // make rfi return address physical or virtual + mov cr.iip=r3 + mov cr.ifs=r0 ;; + mov ar.rnat=r18 // restore ar.rnat + rfi // must be last insn in group + ;; +1: mov rp=r14 + br.ret.sptk.many rp +END(ia64_switch_mode_phys) + +/* + * Switch execution mode from physical to virtual + * + * Inputs: + * r16 = new psr to establish + * + * Note: RSE must already be in enforced lazy mode + */ +GLOBAL_ENTRY(ia64_switch_mode_virt) + { + alloc r2=ar.pfs,0,0,0,0 + rsm psr.i | psr.ic // disable interrupts and interrupt collection + mov r15=ip + } + ;; + { + flushrs // must be first insn in group + srlz.i + } + ;; + mov cr.ipsr=r16 // set new PSR + add r3=1f-ia64_switch_mode_virt,r15 + + mov r17=ar.bsp + mov r14=rp // get return address into a general register + ;; + + // going to virtual + // - for code addresses, set upper bits of addr to KERNEL_START + // - for stack addresses, set upper 3 bits to 0xe.... Dont change any of the + // lower bits since we want it to stay identity mapped + movl r18=KERNEL_START + dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT + dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT + dep r17=-1,r17,61,3 + dep sp=-1,sp,61,3 + ;; + or r3=r3,r18 + or r14=r14,r18 + ;; + + mov r18=ar.rnat // save ar.rnat + mov ar.bspstore=r17 // this steps on ar.rnat mov cr.iip=r3 mov cr.ifs=r0 - dep sp=r15,sp,61,3 // make stack pointer physical or virtual ;; mov ar.rnat=r18 // restore ar.rnat - dep r14=r15,r14,61,3 // make function return address physical or virtual rfi // must be last insn in group ;; 1: mov rp=r14 br.ret.sptk.many rp -END(ia64_switch_mode) +END(ia64_switch_mode_virt) #ifdef CONFIG_IA64_BRL_EMU diff -ru l2567-ref/arch/ia64/kernel/ia64_ksyms.c l2567-reloc/arch/ia64/kernel/ia64_ksyms.c --- l2567-ref/arch/ia64/kernel/ia64_ksyms.c Thu May 1 10:19:28 2003 +++ l2567-reloc/arch/ia64/kernel/ia64_ksyms.c Fri May 9 15:21:25 2003 @@ -146,6 +146,7 @@ EXPORT_SYMBOL(ia64_mv); #endif EXPORT_SYMBOL(machvec_noop); +EXPORT_SYMBOL(zero_page_memmap_ptr); #ifdef CONFIG_PERFMON #include EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem); diff -ru l2567-ref/arch/ia64/kernel/ivt.S l2567-reloc/arch/ia64/kernel/ivt.S --- l2567-ref/arch/ia64/kernel/ivt.S Mon Apr 7 10:30:43 2003 +++ l2567-reloc/arch/ia64/kernel/ivt.S Mon May 12 13:28:44 2003 @@ -122,8 +122,13 @@ shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address ;; (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place + .global ia64_ivt_patch1 +ia64_ivt_patch1: +{ .mlx // we patch this bundle to include physical address of swapper_pg_dir srlz.d // ensure "rsm psr.dt" has taken effect -(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir +(p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir +} + .pred.rel "mutex", p6, p7 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 ;; @@ -415,8 +420,13 @@ shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address ;; (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place - srlz.d -(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir + .global ia64_ivt_patch2 +ia64_ivt_patch2: +{ .mlx // we patch this bundle to include physical address of swapper_pg_dir + srlz.d // ensure "rsm psr.dt" has taken effect +(p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir +} + .pred.rel "mutex", p6, p7 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 ;; diff -ru l2567-ref/arch/ia64/kernel/mca.c l2567-reloc/arch/ia64/kernel/mca.c --- l2567-ref/arch/ia64/kernel/mca.c Thu May 1 10:19:28 2003 +++ l2567-reloc/arch/ia64/kernel/mca.c Fri May 9 15:56:51 2003 @@ -641,17 +641,17 @@ IA64_MCA_DEBUG("ia64_mca_init: registered mca rendezvous spinloop and wakeup mech.\n"); - ia64_mc_info.imi_mca_handler = __pa(mca_hldlr_ptr->fp); + ia64_mc_info.imi_mca_handler = __tpa(mca_hldlr_ptr->fp); /* * XXX - disable SAL checksum by setting size to 0; should be - * __pa(ia64_os_mca_dispatch_end) - __pa(ia64_os_mca_dispatch); + * __tpa(ia64_os_mca_dispatch_end) - __tpa(ia64_os_mca_dispatch); */ ia64_mc_info.imi_mca_handler_size = 0; /* Register the os mca handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, ia64_mc_info.imi_mca_handler, - mca_hldlr_ptr->gp, + __tpa(mca_hldlr_ptr->gp), ia64_mc_info.imi_mca_handler_size, 0, 0, 0))) { @@ -661,15 +661,15 @@ } IA64_MCA_DEBUG("ia64_mca_init: registered os mca handler with SAL at 0x%lx, gp = 0x%lx\n", - ia64_mc_info.imi_mca_handler, mca_hldlr_ptr->gp); + ia64_mc_info.imi_mca_handler, __tpa(mca_hldlr_ptr->gp)); /* * XXX - disable SAL checksum by setting size to 0, should be * IA64_INIT_HANDLER_SIZE */ - ia64_mc_info.imi_monarch_init_handler = __pa(mon_init_ptr->fp); + ia64_mc_info.imi_monarch_init_handler = __tpa(mon_init_ptr->fp); ia64_mc_info.imi_monarch_init_handler_size = 0; - ia64_mc_info.imi_slave_init_handler = __pa(slave_init_ptr->fp); + ia64_mc_info.imi_slave_init_handler = __tpa(slave_init_ptr->fp); ia64_mc_info.imi_slave_init_handler_size = 0; IA64_MCA_DEBUG("ia64_mca_init: os init handler at %lx\n", @@ -678,10 +678,10 @@ /* Register the os init handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, ia64_mc_info.imi_monarch_init_handler, - __pa(ia64_get_gp()), + __tpa(ia64_get_gp()), ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_slave_init_handler, - __pa(ia64_get_gp()), + __tpa(ia64_get_gp()), ia64_mc_info.imi_slave_init_handler_size))) { printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. " diff -ru l2567-ref/arch/ia64/kernel/pal.S l2567-reloc/arch/ia64/kernel/pal.S --- l2567-ref/arch/ia64/kernel/pal.S Mon Apr 7 10:32:57 2003 +++ l2567-reloc/arch/ia64/kernel/pal.S Fri May 9 15:24:27 2003 @@ -164,7 +164,7 @@ ;; mov loc4=ar.rsc // save RSE configuration dep.z loc2=loc2,0,61 // convert pal entry point to physical - dep.z r8=r8,0,61 // convert rp to physical + tpa r8=r8 // convert rp to physical ;; mov b7 = loc2 // install target to branch reg mov ar.rsc=0 // put RSE in enforced lazy, LE mode @@ -174,13 +174,13 @@ or loc3=loc3,r17 // add in psr the bits to set ;; andcm r16=loc3,r16 // removes bits to clear from psr - br.call.sptk.many rp=ia64_switch_mode + br.call.sptk.many rp=ia64_switch_mode_phys .ret1: mov rp = r8 // install return address (physical) br.cond.sptk.many b7 1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // r16= original psr - br.call.sptk.many rp=ia64_switch_mode // return to virtual mode + br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode .ret2: mov psr.l = loc3 // restore init PSR @@ -228,13 +228,13 @@ mov b7 = loc2 // install target to branch reg ;; andcm r16=loc3,r16 // removes bits to clear from psr - br.call.sptk.many rp=ia64_switch_mode + br.call.sptk.many rp=ia64_switch_mode_phys .ret6: br.call.sptk.many rp=b7 // now make the call .ret7: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // r16= original psr - br.call.sptk.many rp=ia64_switch_mode // return to virtual mode + br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode .ret8: mov psr.l = loc3 // restore init PSR mov ar.pfs = loc1 diff -ru l2567-ref/arch/ia64/kernel/setup.c l2567-reloc/arch/ia64/kernel/setup.c --- l2567-ref/arch/ia64/kernel/setup.c Thu May 1 10:19:28 2003 +++ l2567-reloc/arch/ia64/kernel/setup.c Tue May 13 11:26:12 2003 @@ -286,8 +286,8 @@ + strlen(__va(ia64_boot_param->command_line)) + 1); n++; - rsvd_region[n].start = KERNEL_START; - rsvd_region[n].end = KERNEL_END; + rsvd_region[n].start = __imva(KERNEL_START); + rsvd_region[n].end = __imva(KERNEL_END); n++; #ifdef CONFIG_BLK_DEV_INITRD @@ -347,6 +347,47 @@ #endif } +/* + * There are two places in the performance critical path of + * the exception handling code where we need to know the physical + * address of the swapper_pg_dir structure. This routine + * patches the "movl" instructions to load the value needed. + */ +static void __init +patch_ivt_with_phys_swapper_pg_dir(void) +{ + extern char ia64_ivt_patch1[], ia64_ivt_patch2[]; + unsigned long spd = __tpa(swapper_pg_dir); + unsigned long *p; + + p = (unsigned long *)__imva(ia64_ivt_patch1); + + *p = (*p & 0x3fffffffffffUL) | + ((spd & 0x000000ffffc00000UL)<<24); + p++; + *p = (*p & 0xf000080fff800000UL) | + ((spd & 0x8000000000000000UL) >> 4) | + ((spd & 0x7fffff0000000000UL) >> 40) | + ((spd & 0x00000000001f0000UL) << 29) | + ((spd & 0x0000000000200000UL) << 23) | + ((spd & 0x000000000000ff80UL) << 43) | + ((spd & 0x000000000000007fUL) << 36); + + p = (unsigned long *)__imva(ia64_ivt_patch2); + + *p = (*p & 0x3fffffffffffUL) | + ((spd & 0x000000ffffc00000UL)<<24); + p++; + *p = (*p & 0xf000080fff800000UL) | + ((spd & 0x8000000000000000UL) >> 4) | + ((spd & 0x7fffff0000000000UL) >> 40) | + ((spd & 0x00000000001f0000UL) << 29) | + ((spd & 0x0000000000200000UL) << 23) | + ((spd & 0x000000000000ff80UL) << 43) | + ((spd & 0x000000000000007fUL) << 36); +} +#define PATCH_IVT() patch_ivt_with_phys_swapper_pg_dir() + void __init setup_arch (char **cmdline_p) { @@ -355,6 +396,8 @@ unw_init(); + PATCH_IVT(); + *cmdline_p = __va(ia64_boot_param->command_line); strncpy(saved_command_line, *cmdline_p, sizeof(saved_command_line)); saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; /* for safety */ @@ -715,7 +758,7 @@ if (current->mm) BUG(); - ia64_mmu_init(cpu_data); + ia64_mmu_init((void *)__imva(cpu_data)); #ifdef CONFIG_IA32_SUPPORT /* initialize global ia32 state - CR0 and CR4 */ diff -ru l2567-ref/arch/ia64/kernel/smpboot.c l2567-reloc/arch/ia64/kernel/smpboot.c --- l2567-ref/arch/ia64/kernel/smpboot.c Thu May 1 10:19:28 2003 +++ l2567-reloc/arch/ia64/kernel/smpboot.c Fri May 9 15:56:33 2003 @@ -574,7 +574,7 @@ /* Tell SAL where to drop the AP's. */ ap_startup = (struct fptr *) start_ap; sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, - __pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0); + __tpa(ap_startup->fp), __tpa(ap_startup->gp), 0, 0, 0, 0); if (sal_ret < 0) printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret)); diff -ru l2567-ref/arch/ia64/mm/init.c l2567-reloc/arch/ia64/mm/init.c --- l2567-ref/arch/ia64/mm/init.c Thu May 1 10:19:28 2003 +++ l2567-reloc/arch/ia64/mm/init.c Tue May 13 11:22:40 2003 @@ -47,6 +47,8 @@ static int pgt_cache_water[2] = { 25, 50 }; +struct page *zero_page_memmap_ptr; /* map entry for zero page */ + void check_pgt_cache (void) { @@ -112,14 +114,16 @@ void free_initmem (void) { - unsigned long addr; + unsigned long addr, eaddr; - addr = (unsigned long) &__init_begin; - for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) { + addr = (unsigned long)__imva(&__init_begin); + eaddr = (unsigned long)__imva(&__init_end); + while (addr < eaddr) { ClearPageReserved(virt_to_page(addr)); set_page_count(virt_to_page(addr), 1); free_page(addr); ++totalram_pages; + addr += PAGE_SIZE; } printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", (&__init_end - &__init_begin) >> 10); @@ -270,7 +274,7 @@ void __init ia64_mmu_init (void *my_cpu_data) { - unsigned long psr, rid, pta, impl_va_bits; + unsigned long psr, pta, impl_va_bits; extern void __init tlb_init (void); #ifdef CONFIG_DISABLE_VHPT # define VHPT_ENABLE_BIT 0 @@ -278,21 +282,8 @@ # define VHPT_ENABLE_BIT 1 #endif - /* - * Set up the kernel identity mapping for regions 6 and 5. The mapping for region - * 7 is setup up in _start(). - */ + /* Pin mapping for percpu area into TLB */ psr = ia64_clear_ic(); - - rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET); - ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2)); - - rid = ia64_rid(IA64_REGION_ID_KERNEL, VMALLOC_START); - ia64_set_rr(VMALLOC_START, (rid << 8) | (PAGE_SHIFT << 2) | 1); - - /* ensure rr6 is up-to-date before inserting the PERCPU_ADDR translation: */ - ia64_srlz_d(); - ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), PERCPU_PAGE_SHIFT); @@ -495,6 +486,7 @@ discontig_paging_init(); efi_memmap_walk(count_pages, &num_physpages); + zero_page_memmap_ptr = virt_to_page(__imva(empty_zero_page)); } #else /* !CONFIG_DISCONTIGMEM */ void @@ -567,6 +559,7 @@ } free_area_init(zones_size); # endif /* !CONFIG_VIRTUAL_MEM_MAP */ + zero_page_memmap_ptr = virt_to_page(__imva(empty_zero_page)); } #endif /* !CONFIG_DISCONTIGMEM */ @@ -637,7 +630,7 @@ pgt_cache_water[1] = num_pgt_pages; /* install the gate page in the global page table: */ - put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR); + put_gate_page(virt_to_page(__imva(__start_gate_section)), GATE_ADDR); #ifdef CONFIG_IA32_SUPPORT ia32_gdt_init(); diff -ru l2567-ref/arch/ia64/vmlinux.lds.S l2567-reloc/arch/ia64/vmlinux.lds.S --- l2567-ref/arch/ia64/vmlinux.lds.S Mon Apr 7 10:32:27 2003 +++ l2567-reloc/arch/ia64/vmlinux.lds.S Tue May 13 08:44:27 2003 @@ -3,8 +3,9 @@ #include #include #include +#include -#define LOAD_OFFSET PAGE_OFFSET +#define LOAD_OFFSET KERNEL_START + KERNEL_TR_PAGE_SIZE #include OUTPUT_FORMAT("elf64-ia64-little") @@ -23,22 +24,22 @@ } v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ - phys_start = _start - PAGE_OFFSET; + phys_start = _start - LOAD_OFFSET; . = KERNEL_START; _text = .; _stext = .; - .text : AT(ADDR(.text) - PAGE_OFFSET) + .text : AT(ADDR(.text) - LOAD_OFFSET) { *(.text.ivt) *(.text) } - .text2 : AT(ADDR(.text2) - PAGE_OFFSET) + .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP - .text.lock : AT(ADDR(.text.lock) - PAGE_OFFSET) + .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) } #endif _etext = .; @@ -47,14 +48,14 @@ /* Exception table */ . = ALIGN(16); - __ex_table : AT(ADDR(__ex_table) - PAGE_OFFSET) + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { __start___ex_table = .; *(__ex_table) __stop___ex_table = .; } - __mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - PAGE_OFFSET) + __mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; *(__mckinley_e9_bundles) @@ -67,7 +68,7 @@ #if defined(CONFIG_IA64_GENERIC) /* Machine Vector */ . = ALIGN(16); - .machvec : AT(ADDR(.machvec) - PAGE_OFFSET) + .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) { machvec_start = .; *(.machvec) @@ -77,9 +78,9 @@ /* Unwind info & table: */ . = ALIGN(8); - .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET) + .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) { *(.IA_64.unwind_info*) } - .IA_64.unwind : AT(ADDR(.IA_64.unwind) - PAGE_OFFSET) + .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) { ia64_unw_start = .; *(.IA_64.unwind*) @@ -88,20 +89,20 @@ RODATA - .opd : AT(ADDR(.opd) - PAGE_OFFSET) + .opd : AT(ADDR(.opd) - LOAD_OFFSET) { *(.opd) } /* Initialization code and data: */ . = ALIGN(PAGE_SIZE); __init_begin = .; - .init.text : AT(ADDR(.init.text) - PAGE_OFFSET) + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { *(.init.text) } - .init.data : AT(ADDR(.init.data) - PAGE_OFFSET) + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } - .init.ramfs : AT(ADDR(.init.ramfs) - PAGE_OFFSET) + .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { __initramfs_start = .; *(.init.ramfs) @@ -109,19 +110,19 @@ } . = ALIGN(16); - .init.setup : AT(ADDR(.init.setup) - PAGE_OFFSET) + .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { __setup_start = .; *(.init.setup) __setup_end = .; } - __param : AT(ADDR(__param) - PAGE_OFFSET) + __param : AT(ADDR(__param) - LOAD_OFFSET) { __start___param = .; *(__param) __stop___param = .; } - .initcall.init : AT(ADDR(.initcall.init) - PAGE_OFFSET) + .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { __initcall_start = .; *(.initcall1.init) @@ -134,17 +135,17 @@ __initcall_end = .; } __con_initcall_start = .; - .con_initcall.init : AT(ADDR(.con_initcall.init) - PAGE_OFFSET) + .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { *(.con_initcall.init) } __con_initcall_end = .; . = ALIGN(PAGE_SIZE); __init_end = .; /* The initial task and kernel stack */ - .data.init_task : AT(ADDR(.data.init_task) - PAGE_OFFSET) + .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { *(.data.init_task) } - .data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET) + .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { *(__special_page_section) __start_gate_section = .; *(.text.gate) @@ -152,13 +153,13 @@ } . = ALIGN(SMP_CACHE_BYTES); - .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET) + .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) } /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); __phys_per_cpu_start = .; - .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET) + .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) { __per_cpu_start = .; *(.data.percpu) @@ -166,24 +167,24 @@ } . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */ - .data : AT(ADDR(.data) - PAGE_OFFSET) + .data : AT(ADDR(.data) - LOAD_OFFSET) { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } . = ALIGN(16); __gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */ - .got : AT(ADDR(.got) - PAGE_OFFSET) + .got : AT(ADDR(.got) - LOAD_OFFSET) { *(.got.plt) *(.got) } /* We want the small data sections together, so single-instruction offsets can access them all, and initialized data all before uninitialized, so we can shorten the on-disk segment size. */ - .sdata : AT(ADDR(.sdata) - PAGE_OFFSET) + .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { *(.sdata) } _edata = .; _bss = .; - .sbss : AT(ADDR(.sbss) - PAGE_OFFSET) + .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { *(.sbss) *(.scommon) } - .bss : AT(ADDR(.bss) - PAGE_OFFSET) + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { *(.bss) *(COMMON) } _end = .; diff -ru l2567-ref/include/asm-ia64/page.h l2567-reloc/include/asm-ia64/page.h --- l2567-ref/include/asm-ia64/page.h Thu May 1 10:19:29 2003 +++ l2567-reloc/include/asm-ia64/page.h Fri May 9 15:58:45 2003 @@ -118,6 +118,8 @@ */ #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) +#define __tpa(x) ({ia64_va _v; asm("tpa %0=%1" : "=r"(_v.l) : "r"(x)); _v.l;}) +#define __imva(x) ((long)__va(__tpa(x))) #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) diff -ru l2567-ref/include/asm-ia64/pgtable.h l2567-reloc/include/asm-ia64/pgtable.h --- l2567-ref/include/asm-ia64/pgtable.h Thu May 1 10:19:29 2003 +++ l2567-reloc/include/asm-ia64/pgtable.h Tue May 13 09:02:56 2003 @@ -205,7 +205,7 @@ #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ #define RGN_KERNEL 7 -#define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE) +#define VMALLOC_START 0xa000000200000000 #define VMALLOC_VMADDR(x) ((unsigned long)(x)) #ifdef CONFIG_VIRTUAL_MEM_MAP # define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) @@ -448,7 +448,8 @@ * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) +extern struct page *zero_page_memmap_ptr; +#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr) /* We provide our own get_unmapped_area to cope with VA holes for userland */ #define HAVE_ARCH_UNMAPPED_AREA @@ -485,7 +486,6 @@ */ #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) -#define KERNEL_TR_PAGE_NUM ((KERNEL_START - PAGE_OFFSET) / KERNEL_TR_PAGE_SIZE) /* * No page table caches to initialise diff -ru l2567-ref/include/asm-ia64/system.h l2567-reloc/include/asm-ia64/system.h --- l2567-ref/include/asm-ia64/system.h Thu May 1 10:19:29 2003 +++ l2567-reloc/include/asm-ia64/system.h Fri May 9 17:56:14 2003 @@ -19,7 +19,7 @@ #include #include -#define KERNEL_START (PAGE_OFFSET + 68*1024*1024) +#define KERNEL_START (0xA000000100000000) /* 0xa000000000000000 - 0xa000000000000000+PERCPU_MAX_SIZE remain unmapped */ #define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)