From: "Luck, Tony" <tony.luck@intel.com>
To: linux-ia64@vger.kernel.org
Subject: RE: [Linux-ia64] Re: [PATCH] head.S fix for unusual load addrs
Date: Thu, 15 May 2003 21:43:16 +0000 [thread overview]
Message-ID: <marc-linux-ia64-105590723705850@msgid-missing> (raw)
In-Reply-To: <marc-linux-ia64-105590723705550@msgid-missing>
[-- Attachment #1: Type: text/plain, Size: 2112 bytes --]
> It's a bit of "all of the above":
>
> - The name isn't all that great, but since it _is_ doing something
> rather strange, a strange names seems quite appropriate. The patch
> should definitely add a brief (one-liner?) explanation of what
> __imva() stands for and what it does. Also, I really dislike the
> underscore silliness; let's just use ia64_imva() or some such, so
> it's obvious that it is ia64-specific (there is no
> namespace-pollution issue as there would be, say, in a user-level
> library, so there is really no reason for using an underscore
> prefix).
>
> - the casting should be fixed
>
> - instead of aspirins, it might be good to add the ASCII art you
> posted recently in an appropriate place (either a header-file
> or perhaps a Documentation/ia64 file); of course, the picture
> you drew included text-replication, so we may want to hold off
> with this until that part of the patch is in, too
Ok. Here's a new patch (against a snapshot pulled from
http://lia64.bkbits.net:8080/linux-ia64-2.5 at about 9:30
this morning). Builds and boots SMP on Tiger.
The __tpa() and __imva() macros are gone (along with their accursed
double underscore prefixes).
Types are cleaned up somewhat, there may be a couple of questionable
casts, but this looks to be as close to clean as I can make it.
The pre-existing ia64_tpa() function takes a __u64 argument, and returns
a __u64 value ... which matches nicely with all the uses in mca.c, setup.c
and smpboot.c which all use and return "unsigned long" (which is close
enough to __u64 that the compiler doesn't complain).
I've added a new function ia64_imva() which takes a "void *" argument and
returns a "void *" ... which matches with most of the uses, there are some
places that want an "unsigned long" return, so I still have some typecasts.
There's a two-line comment explaining what it does.
No ascii art in this patch, it isn't quite at head-ache complexity
yet. But I'll definitely put some pictures in when we get to replication
patches.
-Tony
[-- Attachment #2: reloc2569.diff --]
[-- Type: application/octet-stream, Size: 27110 bytes --]
diff -ru l2569-mosberger/arch/ia64/kernel/efi_stub.S l2569-aegl/arch/ia64/kernel/efi_stub.S
--- l2569-mosberger/arch/ia64/kernel/efi_stub.S Thu May 15 10:12:08 2003
+++ l2569-aegl/arch/ia64/kernel/efi_stub.S Thu May 15 11:45:02 2003
@@ -62,7 +62,7 @@
mov b6=r2
;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
- br.call.sptk.many rp=ia64_switch_mode
+ br.call.sptk.many rp=ia64_switch_mode_phys
.ret0: mov out4=in5
mov out0=in1
mov out1=in2
@@ -73,7 +73,7 @@
br.call.sptk.many rp=b6 // call the EFI function
.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3
- br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
+ br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2: mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc1
mov rp=loc0
diff -ru l2569-mosberger/arch/ia64/kernel/entry.S l2569-aegl/arch/ia64/kernel/entry.S
--- l2569-mosberger/arch/ia64/kernel/entry.S Thu May 15 10:12:08 2003
+++ l2569-aegl/arch/ia64/kernel/entry.S Thu May 15 11:45:02 2003
@@ -178,15 +178,12 @@
;;
st8 [r22]=sp // save kernel stack pointer of old task
shr.u r26=r20,IA64_GRANULE_SHIFT
- shr.u r17=r20,KERNEL_TR_PAGE_SHIFT
- ;;
- cmp.ne p6,p7=KERNEL_TR_PAGE_NUM,r17
adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
;;
/*
* If we've already mapped this task's page, we can skip doing it again.
*/
-(p6) cmp.eq p7,p6=r26,r27
+ cmp.eq p7,p6=r26,r27
(p6) br.cond.dpnt .map
;;
.done:
diff -ru l2569-mosberger/arch/ia64/kernel/head.S l2569-aegl/arch/ia64/kernel/head.S
--- l2569-mosberger/arch/ia64/kernel/head.S Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/kernel/head.S Thu May 15 11:45:02 2003
@@ -60,22 +60,42 @@
mov r4=r0
.body
- /*
- * Initialize the region register for region 7 and install a translation register
- * that maps the kernel's text and data:
- */
rsm psr.i | psr.ic
- mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET) << 8) | (IA64_GRANULE_SHIFT << 2))
;;
srlz.i
+ ;;
+ /*
+ * Initialize kernel region registers:
+ * rr[5]: VHPT enabled, page size = PAGE_SHIFT
+ * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+ * rr[5]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+ */
+ mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
+ movl r17=(5<<61)
+ mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
+ movl r19=(6<<61)
+ mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
+ movl r21=(7<<61)
+ ;;
+ mov rr[r17]=r16
+ mov rr[r19]=r18
+ mov rr[r21]=r20
+ ;;
+ /*
+ * Now pin mappings into the TLB for kernel text and data
+ */
mov r18=KERNEL_TR_PAGE_SHIFT<<2
movl r17=KERNEL_START
;;
- mov rr[r17]=r16
mov cr.itir=r18
mov cr.ifa=r17
mov r16=IA64_TR_KERNEL
- movl r18=((1 << KERNEL_TR_PAGE_SHIFT) | PAGE_KERNEL)
+ mov r3=ip
+ movl r18=PAGE_KERNEL
+ ;;
+ dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
+ ;;
+ or r18=r2,r18
;;
srlz.i
;;
@@ -113,16 +133,6 @@
mov ar.fpsr=r2
;;
-#ifdef CONFIG_IA64_EARLY_PRINTK
- mov r3=(6<<8) | (IA64_GRANULE_SHIFT<<2)
- movl r2=6<<61
- ;;
- mov rr[r2]=r3
- ;;
- srlz.i
- ;;
-#endif
-
#define isAP p2 // are we an Application Processor?
#define isBP p3 // are we the Bootstrap Processor?
@@ -143,12 +153,36 @@
movl r2=init_thread_union
cmp.eq isBP,isAP=r0,r0
#endif
- mov r16=KERNEL_TR_PAGE_NUM
;;
+ tpa r3=r2 // r3 == phys addr of task struct
+ // load mapping for stack (virtaddr in r2, physaddr in r3)
+ rsm psr.ic
+ movl r17=PAGE_KERNEL
+ ;;
+ srlz.d
+ dep r18=0,r3,0,12
+ ;;
+ or r18=r17,r18
+ dep r2=-1,r3,61,3 // IMVA of task
+ ;;
+ mov r17=rr[r2]
+ shr.u r16=r3,IA64_GRANULE_SHIFT
+ ;;
+ dep r17=0,r17,8,24
+ ;;
+ mov cr.itir=r17
+ mov cr.ifa=r2
+
+ mov r19=IA64_TR_CURRENT_STACK
+ ;;
+ itr.d dtr[r19]=r18
+ ;;
+ ssm psr.ic
+ srlz.d
+ ;;
// load the "current" pointer (r13) and ar.k6 with the current task
mov IA64_KR(CURRENT)=r2 // virtual address
- // initialize k4 to a safe value (64-128MB is mapped by TR_KERNEL)
mov IA64_KR(CURRENT_STACK)=r16
mov r13=r2
/*
@@ -665,14 +699,14 @@
END(__ia64_init_fpu)
/*
- * Switch execution mode from virtual to physical or vice versa.
+ * Switch execution mode from virtual to physical
*
* Inputs:
* r16 = new psr to establish
*
* Note: RSE must already be in enforced lazy mode
*/
-GLOBAL_ENTRY(ia64_switch_mode)
+GLOBAL_ENTRY(ia64_switch_mode_phys)
{
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
@@ -682,35 +716,86 @@
{
flushrs // must be first insn in group
srlz.i
- shr.u r19=r15,61 // r19 <- top 3 bits of current IP
}
;;
mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode,r15
- xor r15=0x7,r19 // flip the region bits
+ add r3=1f-ia64_switch_mode_phys,r15
mov r17=ar.bsp
mov r14=rp // get return address into a general register
+ ;;
- // switch RSE backing store:
+ // going to physical mode, use tpa to translate virt->phys
+ tpa r17=r17
+ tpa r3=r3
+ tpa sp=sp
+ tpa r14=r14
;;
- dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
+
mov r18=ar.rnat // save ar.rnat
- ;;
mov ar.bspstore=r17 // this steps on ar.rnat
- dep r3=r15,r3,61,3 // make rfi return address physical or virtual
+ mov cr.iip=r3
+ mov cr.ifs=r0
;;
+ mov ar.rnat=r18 // restore ar.rnat
+ rfi // must be last insn in group
+ ;;
+1: mov rp=r14
+ br.ret.sptk.many rp
+END(ia64_switch_mode_phys)
+
+/*
+ * Switch execution mode from physical to virtual
+ *
+ * Inputs:
+ * r16 = new psr to establish
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode_virt)
+ {
+ alloc r2=ar.pfs,0,0,0,0
+ rsm psr.i | psr.ic // disable interrupts and interrupt collection
+ mov r15=ip
+ }
+ ;;
+ {
+ flushrs // must be first insn in group
+ srlz.i
+ }
+ ;;
+ mov cr.ipsr=r16 // set new PSR
+ add r3=1f-ia64_switch_mode_virt,r15
+
+ mov r17=ar.bsp
+ mov r14=rp // get return address into a general register
+ ;;
+
+ // going to virtual
+ // - for code addresses, set upper bits of addr to KERNEL_START
+ // - for stack addresses, set upper 3 bits to 0xe.... Dont change any of the
+ // lower bits since we want it to stay identity mapped
+ movl r18=KERNEL_START
+ dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+ dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+ dep r17=-1,r17,61,3
+ dep sp=-1,sp,61,3
+ ;;
+ or r3=r3,r18
+ or r14=r14,r18
+ ;;
+
+ mov r18=ar.rnat // save ar.rnat
+ mov ar.bspstore=r17 // this steps on ar.rnat
mov cr.iip=r3
mov cr.ifs=r0
- dep sp=r15,sp,61,3 // make stack pointer physical or virtual
;;
mov ar.rnat=r18 // restore ar.rnat
- dep r14=r15,r14,61,3 // make function return address physical or virtual
rfi // must be last insn in group
;;
1: mov rp=r14
br.ret.sptk.many rp
-END(ia64_switch_mode)
+END(ia64_switch_mode_virt)
#ifdef CONFIG_IA64_BRL_EMU
diff -ru l2569-mosberger/arch/ia64/kernel/ia64_ksyms.c l2569-aegl/arch/ia64/kernel/ia64_ksyms.c
--- l2569-mosberger/arch/ia64/kernel/ia64_ksyms.c Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/kernel/ia64_ksyms.c Thu May 15 11:45:02 2003
@@ -159,6 +159,7 @@
EXPORT_SYMBOL(ia64_mv);
#endif
EXPORT_SYMBOL(machvec_noop);
+EXPORT_SYMBOL(zero_page_memmap_ptr);
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
diff -ru l2569-mosberger/arch/ia64/kernel/ivt.S l2569-aegl/arch/ia64/kernel/ivt.S
--- l2569-mosberger/arch/ia64/kernel/ivt.S Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/kernel/ivt.S Thu May 15 11:45:02 2003
@@ -122,8 +122,13 @@
shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
+ .global ia64_ivt_patch1
+ia64_ivt_patch1:
+{ .mlx // we patch this bundle to include physical address of swapper_pg_dir
srlz.d // ensure "rsm psr.dt" has taken effect
-(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
+(p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir
+}
+ .pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;;
@@ -415,8 +420,13 @@
shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
- srlz.d
-(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
+ .global ia64_ivt_patch2
+ia64_ivt_patch2:
+{ .mlx // we patch this bundle to include physical address of swapper_pg_dir
+ srlz.d // ensure "rsm psr.dt" has taken effect
+(p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir
+}
+ .pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;;
diff -ru l2569-mosberger/arch/ia64/kernel/mca.c l2569-aegl/arch/ia64/kernel/mca.c
--- l2569-mosberger/arch/ia64/kernel/mca.c Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/kernel/mca.c Thu May 15 13:19:31 2003
@@ -662,17 +662,17 @@
IA64_MCA_DEBUG("ia64_mca_init: registered mca rendezvous spinloop and wakeup mech.\n");
- ia64_mc_info.imi_mca_handler = __pa(mca_hldlr_ptr->fp);
+ ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
/*
* XXX - disable SAL checksum by setting size to 0; should be
- * __pa(ia64_os_mca_dispatch_end) - __pa(ia64_os_mca_dispatch);
+ * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
*/
ia64_mc_info.imi_mca_handler_size = 0;
/* Register the os mca handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
ia64_mc_info.imi_mca_handler,
- mca_hldlr_ptr->gp,
+ ia64_tpa(mca_hldlr_ptr->gp),
ia64_mc_info.imi_mca_handler_size,
0, 0, 0)))
{
@@ -682,15 +682,15 @@
}
IA64_MCA_DEBUG("ia64_mca_init: registered os mca handler with SAL at 0x%lx, gp = 0x%lx\n",
- ia64_mc_info.imi_mca_handler, mca_hldlr_ptr->gp);
+ ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
/*
* XXX - disable SAL checksum by setting size to 0, should be
* IA64_INIT_HANDLER_SIZE
*/
- ia64_mc_info.imi_monarch_init_handler = __pa(mon_init_ptr->fp);
+ ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp);
ia64_mc_info.imi_monarch_init_handler_size = 0;
- ia64_mc_info.imi_slave_init_handler = __pa(slave_init_ptr->fp);
+ ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp);
ia64_mc_info.imi_slave_init_handler_size = 0;
IA64_MCA_DEBUG("ia64_mca_init: os init handler at %lx\n",
@@ -699,10 +699,10 @@
/* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler,
- __pa(ia64_get_gp()),
+ ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler,
- __pa(ia64_get_gp()),
+ ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_slave_init_handler_size)))
{
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
diff -ru l2569-mosberger/arch/ia64/kernel/pal.S l2569-aegl/arch/ia64/kernel/pal.S
--- l2569-mosberger/arch/ia64/kernel/pal.S Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/kernel/pal.S Thu May 15 11:45:02 2003
@@ -164,7 +164,7 @@
;;
mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical
- dep.z r8=r8,0,61 // convert rp to physical
+ tpa r8=r8 // convert rp to physical
;;
mov b7 = loc2 // install target to branch reg
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
@@ -174,13 +174,13 @@
or loc3=loc3,r17 // add in psr the bits to set
;;
andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode
+ br.call.sptk.many rp=ia64_switch_mode_phys
.ret1: mov rp = r8 // install return address (physical)
br.cond.sptk.many b7
1:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
- br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
+ br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2:
mov psr.l = loc3 // restore init PSR
@@ -228,13 +228,13 @@
mov b7 = loc2 // install target to branch reg
;;
andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode
+ br.call.sptk.many rp=ia64_switch_mode_phys
.ret6:
br.call.sptk.many rp=b7 // now make the call
.ret7:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
- br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
+ br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret8: mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
diff -ru l2569-mosberger/arch/ia64/kernel/setup.c l2569-aegl/arch/ia64/kernel/setup.c
--- l2569-mosberger/arch/ia64/kernel/setup.c Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/kernel/setup.c Thu May 15 13:47:44 2003
@@ -265,7 +265,7 @@
static void
find_memory (void)
{
-# define KERNEL_END ((unsigned long) &_end)
+# define KERNEL_END (&_end)
unsigned long bootmap_size;
unsigned long max_pfn;
int n = 0;
@@ -286,8 +286,8 @@
+ strlen(__va(ia64_boot_param->command_line)) + 1);
n++;
- rsvd_region[n].start = KERNEL_START;
- rsvd_region[n].end = KERNEL_END;
+ rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
+ rsvd_region[n].end = (unsigned long) ia64_imva(KERNEL_END);
n++;
#ifdef CONFIG_BLK_DEV_INITRD
@@ -347,6 +347,47 @@
#endif
}
+/*
+ * There are two places in the performance critical path of
+ * the exception handling code where we need to know the physical
+ * address of the swapper_pg_dir structure. This routine
+ * patches the "movl" instructions to load the value needed.
+ */
+static void __init
+patch_ivt_with_phys_swapper_pg_dir(void)
+{
+ extern char ia64_ivt_patch1[], ia64_ivt_patch2[];
+ unsigned long spd = ia64_tpa((__u64)swapper_pg_dir);
+ unsigned long *p;
+
+ p = (unsigned long *)ia64_imva(ia64_ivt_patch1);
+
+ *p = (*p & 0x3fffffffffffUL) |
+ ((spd & 0x000000ffffc00000UL)<<24);
+ p++;
+ *p = (*p & 0xf000080fff800000UL) |
+ ((spd & 0x8000000000000000UL) >> 4) |
+ ((spd & 0x7fffff0000000000UL) >> 40) |
+ ((spd & 0x00000000001f0000UL) << 29) |
+ ((spd & 0x0000000000200000UL) << 23) |
+ ((spd & 0x000000000000ff80UL) << 43) |
+ ((spd & 0x000000000000007fUL) << 36);
+
+ p = (unsigned long *)ia64_imva(ia64_ivt_patch2);
+
+ *p = (*p & 0x3fffffffffffUL) |
+ ((spd & 0x000000ffffc00000UL)<<24);
+ p++;
+ *p = (*p & 0xf000080fff800000UL) |
+ ((spd & 0x8000000000000000UL) >> 4) |
+ ((spd & 0x7fffff0000000000UL) >> 40) |
+ ((spd & 0x00000000001f0000UL) << 29) |
+ ((spd & 0x0000000000200000UL) << 23) |
+ ((spd & 0x000000000000ff80UL) << 43) |
+ ((spd & 0x000000000000007fUL) << 36);
+}
+#define PATCH_IVT() patch_ivt_with_phys_swapper_pg_dir()
+
void __init
setup_arch (char **cmdline_p)
{
@@ -355,6 +396,8 @@
unw_init();
+ PATCH_IVT();
+
*cmdline_p = __va(ia64_boot_param->command_line);
strncpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; /* for safety */
@@ -755,7 +798,7 @@
if (current->mm)
BUG();
- ia64_mmu_init(cpu_data);
+ ia64_mmu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT
/* initialize global ia32 state - CR0 and CR4 */
diff -ru l2569-mosberger/arch/ia64/kernel/smpboot.c l2569-aegl/arch/ia64/kernel/smpboot.c
--- l2569-mosberger/arch/ia64/kernel/smpboot.c Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/kernel/smpboot.c Thu May 15 13:22:14 2003
@@ -598,7 +598,7 @@
/* Tell SAL where to drop the AP's. */
ap_startup = (struct fptr *) start_ap;
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
- __pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0);
+ ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
if (sal_ret < 0)
printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
ia64_sal_strerror(sal_ret));
diff -ru l2569-mosberger/arch/ia64/mm/init.c l2569-aegl/arch/ia64/mm/init.c
--- l2569-mosberger/arch/ia64/mm/init.c Thu May 15 10:12:09 2003
+++ l2569-aegl/arch/ia64/mm/init.c Thu May 15 14:09:55 2003
@@ -47,6 +47,8 @@
static int pgt_cache_water[2] = { 25, 50 };
+struct page *zero_page_memmap_ptr; /* map entry for zero page */
+
void
check_pgt_cache (void)
{
@@ -112,14 +114,16 @@
void
free_initmem (void)
{
- unsigned long addr;
+ unsigned long addr, eaddr;
- addr = (unsigned long) &__init_begin;
- for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
+ addr = (unsigned long) ia64_imva(&__init_begin);
+ eaddr = (unsigned long) ia64_imva(&__init_end);
+ while (addr < eaddr) {
ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1);
free_page(addr);
++totalram_pages;
+ addr += PAGE_SIZE;
}
printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
(&__init_end - &__init_begin) >> 10);
@@ -269,7 +273,7 @@
void __init
ia64_mmu_init (void *my_cpu_data)
{
- unsigned long psr, rid, pta, impl_va_bits;
+ unsigned long psr, pta, impl_va_bits;
extern void __init tlb_init (void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
@@ -277,21 +281,8 @@
# define VHPT_ENABLE_BIT 1
#endif
- /*
- * Set up the kernel identity mapping for regions 6 and 5. The mapping for region
- * 7 is setup up in _start().
- */
+ /* Pin mapping for percpu area into TLB */
psr = ia64_clear_ic();
-
- rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET);
- ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2));
-
- rid = ia64_rid(IA64_REGION_ID_KERNEL, VMALLOC_START);
- ia64_set_rr(VMALLOC_START, (rid << 8) | (PAGE_SHIFT << 2) | 1);
-
- /* ensure rr6 is up-to-date before inserting the PERCPU_ADDR translation: */
- ia64_srlz_d();
-
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
PERCPU_PAGE_SHIFT);
@@ -489,6 +480,7 @@
discontig_paging_init();
efi_memmap_walk(count_pages, &num_physpages);
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
#else /* !CONFIG_DISCONTIGMEM */
void
@@ -560,6 +552,7 @@
}
free_area_init(zones_size);
# endif /* !CONFIG_VIRTUAL_MEM_MAP */
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
#endif /* !CONFIG_DISCONTIGMEM */
@@ -630,7 +623,7 @@
pgt_cache_water[1] = num_pgt_pages;
/* install the gate page in the global page table: */
- put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
+ put_gate_page(virt_to_page(ia64_imva(__start_gate_section)), GATE_ADDR);
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
diff -ru l2569-mosberger/arch/ia64/vmlinux.lds.S l2569-aegl/arch/ia64/vmlinux.lds.S
--- l2569-mosberger/arch/ia64/vmlinux.lds.S Thu May 15 10:12:08 2003
+++ l2569-aegl/arch/ia64/vmlinux.lds.S Thu May 15 11:46:25 2003
@@ -3,8 +3,9 @@
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
-#define LOAD_OFFSET PAGE_OFFSET
+#define LOAD_OFFSET KERNEL_START + KERNEL_TR_PAGE_SIZE
#include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-ia64-little")
@@ -23,22 +24,22 @@
}
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
- phys_start = _start - PAGE_OFFSET;
+ phys_start = _start - LOAD_OFFSET;
. = KERNEL_START;
_text = .;
_stext = .;
- .text : AT(ADDR(.text) - PAGE_OFFSET)
+ .text : AT(ADDR(.text) - LOAD_OFFSET)
{
*(.text.ivt)
*(.text)
}
- .text2 : AT(ADDR(.text2) - PAGE_OFFSET)
+ .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
#ifdef CONFIG_SMP
- .text.lock : AT(ADDR(.text.lock) - PAGE_OFFSET)
+ .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
{ *(.text.lock) }
#endif
_etext = .;
@@ -47,14 +48,14 @@
/* Exception table */
. = ALIGN(16);
- __ex_table : AT(ADDR(__ex_table) - PAGE_OFFSET)
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
{
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
- __mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - PAGE_OFFSET)
+ __mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
*(__mckinley_e9_bundles)
@@ -67,7 +68,7 @@
#if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */
. = ALIGN(16);
- .machvec : AT(ADDR(.machvec) - PAGE_OFFSET)
+ .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
{
machvec_start = .;
*(.machvec)
@@ -77,9 +78,9 @@
/* Unwind info & table: */
. = ALIGN(8);
- .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET)
+ .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
{ *(.IA_64.unwind_info*) }
- .IA_64.unwind : AT(ADDR(.IA_64.unwind) - PAGE_OFFSET)
+ .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
{
ia64_unw_start = .;
*(.IA_64.unwind*)
@@ -88,24 +89,24 @@
RODATA
- .opd : AT(ADDR(.opd) - PAGE_OFFSET)
+ .opd : AT(ADDR(.opd) - LOAD_OFFSET)
{ *(.opd) }
/* Initialization code and data: */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
- .init.text : AT(ADDR(.init.text) - PAGE_OFFSET)
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
{
_sinittext = .;
*(.init.text)
_einittext = .;
}
- .init.data : AT(ADDR(.init.data) - PAGE_OFFSET)
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
{ *(.init.data) }
- .init.ramfs : AT(ADDR(.init.ramfs) - PAGE_OFFSET)
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
{
__initramfs_start = .;
*(.init.ramfs)
@@ -113,19 +114,19 @@
}
. = ALIGN(16);
- .init.setup : AT(ADDR(.init.setup) - PAGE_OFFSET)
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
{
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
- __param : AT(ADDR(__param) - PAGE_OFFSET)
+ __param : AT(ADDR(__param) - LOAD_OFFSET)
{
__start___param = .;
*(__param)
__stop___param = .;
}
- .initcall.init : AT(ADDR(.initcall.init) - PAGE_OFFSET)
+ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
{
__initcall_start = .;
*(.initcall1.init)
@@ -138,17 +139,17 @@
__initcall_end = .;
}
__con_initcall_start = .;
- .con_initcall.init : AT(ADDR(.con_initcall.init) - PAGE_OFFSET)
+ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) }
__con_initcall_end = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* The initial task and kernel stack */
- .data.init_task : AT(ADDR(.data.init_task) - PAGE_OFFSET)
+ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
{ *(.data.init_task) }
- .data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET)
+ .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
{ *(__special_page_section)
__start_gate_section = .;
*(.text.gate)
@@ -156,13 +157,13 @@
}
. = ALIGN(SMP_CACHE_BYTES);
- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
+ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
{ *(.data.cacheline_aligned) }
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
__phys_per_cpu_start = .;
- .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET)
+ .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
__per_cpu_start = .;
*(.data.percpu)
@@ -170,24 +171,24 @@
}
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
- .data : AT(ADDR(.data) - PAGE_OFFSET)
+ .data : AT(ADDR(.data) - LOAD_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
- .got : AT(ADDR(.got) - PAGE_OFFSET)
+ .got : AT(ADDR(.got) - LOAD_OFFSET)
{ *(.got.plt) *(.got) }
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
- .sdata : AT(ADDR(.sdata) - PAGE_OFFSET)
+ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) }
_edata = .;
_bss = .;
- .sbss : AT(ADDR(.sbss) - PAGE_OFFSET)
+ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
{ *(.sbss) *(.scommon) }
- .bss : AT(ADDR(.bss) - PAGE_OFFSET)
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET)
{ *(.bss) *(COMMON) }
_end = .;
diff -ru l2569-mosberger/include/asm-ia64/pgtable.h l2569-aegl/include/asm-ia64/pgtable.h
--- l2569-mosberger/include/asm-ia64/pgtable.h Thu May 15 10:13:00 2003
+++ l2569-aegl/include/asm-ia64/pgtable.h Thu May 15 11:45:02 2003
@@ -207,7 +207,7 @@
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
#define RGN_KERNEL 7
-#define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE)
+#define VMALLOC_START 0xa000000200000000
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
@@ -450,7 +450,8 @@
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+extern struct page *zero_page_memmap_ptr;
+#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
@@ -481,7 +482,6 @@
*/
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
-#define KERNEL_TR_PAGE_NUM ((KERNEL_START - PAGE_OFFSET) / KERNEL_TR_PAGE_SIZE)
/*
* No page table caches to initialise
diff -ru l2569-mosberger/include/asm-ia64/processor.h l2569-aegl/include/asm-ia64/processor.h
--- l2569-mosberger/include/asm-ia64/processor.h Thu May 15 10:13:00 2003
+++ l2569-aegl/include/asm-ia64/processor.h Thu May 15 13:42:17 2003
@@ -929,6 +929,18 @@
return result;
}
+/*
+ * Take a mapped kernel address and return the equivalent address
+ * in the region 7 identity mapped virtual area.
+ */
+static inline void *
+ia64_imva (void *addr)
+{
+ void *result;
+ asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
+ return __va(result);
+}
+
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
diff -ru l2569-mosberger/include/asm-ia64/system.h l2569-aegl/include/asm-ia64/system.h
--- l2569-mosberger/include/asm-ia64/system.h Thu May 15 10:13:00 2003
+++ l2569-aegl/include/asm-ia64/system.h Thu May 15 11:45:02 2003
@@ -19,7 +19,7 @@
#include <asm/pal.h>
#include <asm/percpu.h>
-#define KERNEL_START (PAGE_OFFSET + 68*1024*1024)
+#define KERNEL_START (0xA000000100000000)
/* 0xa000000000000000 - 0xa000000000000000+PERCPU_MAX_SIZE remain unmapped */
#define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
next prev parent reply other threads:[~2003-05-15 21:43 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2003-04-17 23:05 [Linux-ia64] Re: [PATCH] head.S fix for unusual load addrs David Mosberger
2003-04-17 23:57 ` Jesse Barnes
2003-04-25 21:02 ` Jesse Barnes
2003-05-07 22:39 ` David Mosberger
2003-05-07 23:24 ` Luck, Tony
2003-05-07 23:51 ` David Mosberger
2003-05-08 0:00 ` Jesse Barnes
2003-05-08 0:04 ` Jesse Barnes
2003-05-08 0:07 ` Luck, Tony
2003-05-08 0:13 ` Keith Owens
2003-05-08 0:21 ` David Mosberger
2003-05-08 0:23 ` David Mosberger
2003-05-08 0:24 ` Keith Owens
2003-05-08 0:54 ` David Mosberger
2003-05-08 1:07 ` David Mosberger
2003-05-08 1:46 ` Jesse Barnes
2003-05-08 1:55 ` Keith Owens
2003-05-08 2:16 ` Keith Owens
2003-05-08 4:59 ` David Mosberger
2003-05-08 16:07 ` Jesse Barnes
2003-05-08 17:07 ` David Mosberger
2003-05-08 17:20 ` Jesse Barnes
2003-05-08 17:50 ` David Mosberger
2003-05-08 17:54 ` Luck, Tony
2003-05-08 20:29 ` David Mosberger
2003-05-08 22:17 ` Keith Owens
2003-05-08 22:27 ` Luck, Tony
2003-05-08 22:31 ` Jesse Barnes
2003-05-08 22:53 ` David Mosberger
2003-05-08 23:32 ` David Mosberger
2003-05-09 0:01 ` Jesse Barnes
2003-05-09 0:11 ` Jesse Barnes
2003-05-09 17:52 ` Jesse Barnes
2003-05-09 18:25 ` David Mosberger
2003-05-09 19:30 ` Jesse Barnes
2003-05-09 19:31 ` Jack Steiner
2003-05-09 20:02 ` Jack Steiner
2003-05-09 20:25 ` David Mosberger
2003-05-09 21:43 ` Luck, Tony
2003-05-10 2:39 ` Jack Steiner
2003-05-13 22:18 ` Luck, Tony
2003-05-14 1:24 ` Jesse Barnes
2003-05-14 5:29 ` Christian Hildner
2003-05-14 16:44 ` Luck, Tony
2003-05-15 3:05 ` David Mosberger
2003-05-15 16:33 ` Luck, Tony
2003-05-15 18:03 ` Jack Steiner
2003-05-15 18:59 ` David Mosberger
2003-05-15 21:43 ` Luck, Tony [this message]
2003-05-16 22:33 ` Luck, Tony
2003-05-16 22:47 ` David Mosberger
2003-05-16 22:54 ` [Linux-ia64] " Luck, Tony
2003-05-16 22:58 ` David Mosberger
2003-05-19 17:57 ` Luck, Tony
2003-05-19 18:02 ` Jesse Barnes
2003-05-19 18:39 ` David Mosberger
2003-05-19 19:07 ` Luck, Tony
2003-05-28 19:10 ` Luck, Tony
2003-05-28 20:05 ` Luck, Tony
2003-05-28 20:13 ` Luck, Tony
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=marc-linux-ia64-105590723705850@msgid-missing \
--to=tony.luck@intel.com \
--cc=linux-ia64@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox