linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] powerpc: A bit more way paved toward 64-bit Book3E support
@ 2009-06-02  7:50 Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 1/5] powerpc: Move VMX and VSX asm code to vector.S Benjamin Herrenschmidt
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02  7:50 UTC (permalink / raw)
  To: linuxppc-dev

These are a few patches that are extracted or cleaned up from
the current 64-bit Book3E support pile and that could be merged
already in 2.6.31 to make subsequent patches easier to maintain
and apply.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/5] powerpc: Move VMX and VSX asm code to vector.S
  2009-06-02  7:50 [PATCH 0/5] powerpc: A bit more way paved toward 64-bit Book3E support Benjamin Herrenschmidt
@ 2009-06-02  7:50 ` Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 2/5] powerpc: Split exception handling out of head_64.S Benjamin Herrenschmidt
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02  7:50 UTC (permalink / raw)
  To: linuxppc-dev

Currently, load_up_altivec and give_up_altivec are duplicated
in 32-bit and 64-bit. This creates a common implementation that
is moved away from head_32.S, head_64.S and misc_64.S and into
vector.S, using the same macros we already use for our common
implementation of load_up_fpu.

I also moved the VSX code over to vector.S though in that case
I didn't make it build on 32-bit (yet).

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/Makefile         |    1 
 arch/powerpc/kernel/Makefile  |    3 
 arch/powerpc/kernel/head_32.S |   95 -------------------
 arch/powerpc/kernel/head_64.S |  118 -----------------------
 arch/powerpc/kernel/misc_64.S |   92 ------------------
 arch/powerpc/kernel/vector.S  |  210 ++++++++++++++++++++++++++++++++++++++++++
 6 files changed, 213 insertions(+), 306 deletions(-)

--- linux-work.orig/arch/powerpc/kernel/vector.S	2009-06-02 15:37:39.000000000 +1000
+++ linux-work/arch/powerpc/kernel/vector.S	2009-06-02 16:22:03.000000000 +1000
@@ -1,5 +1,215 @@
+#include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/reg.h>
+#include <asm/asm-offsets.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+/*
+ * load_up_altivec(unused, unused, tsk)
+ * Disable VMX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ * On SMP we know the VMX is free, since we give it up every
+ * switch (ie, no lazy save of the vector registers).
+ */
+_GLOBAL(load_up_altivec)
+	mfmsr	r5			/* grab the current MSR */
+	oris	r5,r5,MSR_VEC@h
+	MTMSRD(r5)			/* enable use of AltiVec now */
+	isync
+
+/*
+ * For SMP, we don't do lazy VMX switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_altvec in switch_to.
+ * VRSAVE isn't dealt with here, that is done in the normal context
+ * switch code. Note that we could rely on vrsave value to eventually
+ * avoid saving all of the VREGs here...
+ */
+#ifndef CONFIG_SMP
+	LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
+	toreal(r3)
+	PPC_LL	r4,ADDROFF(last_task_used_altivec)(r3)
+	PPC_LCMPI	0,r4,0
+	beq	1f
+
+	/* Save VMX state to last_task_used_altivec's THREAD struct */
+	toreal(r4)
+	addi	r4,r4,THREAD
+	SAVE_32VRS(0,r5,r4)
+	mfvscr	vr0
+	li	r10,THREAD_VSCR
+	stvx	vr0,r10,r4
+	/* Disable VMX for last_task_used_altivec */
+	PPC_LL	r5,PT_REGS(r4)
+	toreal(r5)
+	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r10,MSR_VEC@h
+	andc	r4,r4,r10
+	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+
+	/* Hack: if we get an altivec unavailable trap with VRSAVE
+	 * set to all zeros, we assume this is a broken application
+	 * that fails to set it properly, and thus we switch it to
+	 * all 1's
+	 */
+	mfspr	r4,SPRN_VRSAVE
+	cmpdi	0,r4,0
+	bne+	1f
+	li	r4,-1
+	mtspr	SPRN_VRSAVE,r4
+1:
+	/* enable use of VMX after return */
+#ifdef CONFIG_PPC32
+	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
+	oris	r9,r9,MSR_VEC@h
+#else
+	ld	r4,PACACURRENT(r13)
+	addi	r5,r4,THREAD		/* Get THREAD */
+	oris	r12,r12,MSR_VEC@h
+	std	r12,_MSR(r1)
+#endif
+	li	r4,1
+	li	r10,THREAD_VSCR
+	stw	r4,THREAD_USED_VR(r5)
+	lvx	vr0,r10,r5
+	mtvscr	vr0
+	REST_32VRS(0,r4,r5)
+#ifndef CONFIG_SMP
+	/* Update last_task_used_math to 'current' */
+	subi	r4,r5,THREAD		/* Back to 'current' */
+	fromreal(r4)
+	PPC_STL	r4,ADDROFF(last_task_used_math)(r3)
+#endif /* CONFIG_SMP */
+	/* restore registers and return */
+	blr
+
+/*
+ * giveup_altivec(tsk)
+ * Disable VMX for the task given as the argument,
+ * and save the vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ */
+_GLOBAL(giveup_altivec)
+	mfmsr	r5
+	oris	r5,r5,MSR_VEC@h
+	SYNC
+	MTMSRD(r5)			/* enable use of VMX now */
+	isync
+	PPC_LCMPI	0,r3,0
+	beqlr-				/* if no previous owner, done */
+	addi	r3,r3,THREAD		/* want THREAD of task */
+	PPC_LL	r5,PT_REGS(r3)
+	PPC_LCMPI	0,r5,0
+	SAVE_32VRS(0,r4,r3)
+	mfvscr	vr0
+	li	r4,THREAD_VSCR
+	stvx	vr0,r4,r3
+	beq	1f
+	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+	lis	r3,(MSR_VEC|MSR_VSX)@h
+FTR_SECTION_ELSE
+	lis	r3,MSR_VEC@h
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#else
+ 	lis	r3,MSR_VEC@h
+#endif
+	andc	r4,r4,r3		/* disable FP for previous task */
+	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+	li	r5,0
+	LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
+	PPC_STL	r5,ADDROFF(last_task_used_altivec)(r4)
+#endif /* CONFIG_SMP */
+	blr
+
+#ifdef CONFIG_VSX
+
+#ifdef CONFIG_PPC32
+#error This asm code isn't ready for 32-bit kernels
+#endif
+
+/*
+ * load_up_vsx(unused, unused, tsk)
+ * Disable VSX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Reuse the fp and vsx saves, but first check to see if they have
+ * been saved already.
+ */
+_GLOBAL(load_up_vsx)
+/* Load FP and VSX registers if they haven't been done yet */
+	andi.	r5,r12,MSR_FP
+	beql+	load_up_fpu		/* skip if already loaded */
+	andis.	r5,r12,MSR_VEC@h
+	beql+	load_up_altivec		/* skip if already loaded */
+
+#ifndef CONFIG_SMP
+	ld	r3,last_task_used_vsx@got(r2)
+	ld	r4,0(r3)
+	cmpdi	0,r4,0
+	beq	1f
+	/* Disable VSX for last_task_used_vsx */
+	addi	r4,r4,THREAD
+	ld	r5,PT_REGS(r4)
+	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r6,MSR_VSX@h
+	andc	r6,r4,r6
+	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+	ld	r4,PACACURRENT(r13)
+	addi	r4,r4,THREAD		/* Get THREAD */
+	li	r6,1
+	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
+	/* enable use of VSX after return */
+	oris	r12,r12,MSR_VSX@h
+	std	r12,_MSR(r1)
+#ifndef CONFIG_SMP
+	/* Update last_task_used_math to 'current' */
+	ld	r4,PACACURRENT(r13)
+	std	r4,0(r3)
+#endif /* CONFIG_SMP */
+	b	fast_exception_return
+
+/*
+ * __giveup_vsx(tsk)
+ * Disable VSX for the task given as the argument.
+ * Does NOT save vsx registers.
+ * Enables the VSX for use in the kernel on return.
+ */
+_GLOBAL(__giveup_vsx)
+	mfmsr	r5
+	oris	r5,r5,MSR_VSX@h
+	mtmsrd	r5			/* enable use of VSX now */
+	isync
+
+	cmpdi	0,r3,0
+	beqlr-				/* if no previous owner, done */
+	addi	r3,r3,THREAD		/* want THREAD of task */
+	ld	r5,PT_REGS(r3)
+	cmpdi	0,r5,0
+	beq	1f
+	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r3,MSR_VSX@h
+	andc	r4,r4,r3		/* disable VSX for previous task */
+	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+	li	r5,0
+	ld	r4,last_task_used_vsx@got(r2)
+	std	r5,0(r4)
+#endif /* CONFIG_SMP */
+	blr
+
+#endif /* CONFIG_VSX */
+
 
 /*
  * The routines below are in assembler so we can closely control the
Index: linux-work/arch/powerpc/kernel/head_32.S
===================================================================
--- linux-work.orig/arch/powerpc/kernel/head_32.S	2009-06-02 15:37:39.000000000 +1000
+++ linux-work/arch/powerpc/kernel/head_32.S	2009-06-02 15:38:31.000000000 +1000
@@ -743,101 +743,6 @@ PerformanceMonitor:
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	EXC_XFER_STD(0xf00, performance_monitor_exception)
 
-#ifdef CONFIG_ALTIVEC
-/* Note that the AltiVec support is closely modeled after the FP
- * support.  Changes to one are likely to be applicable to the
- * other!  */
-load_up_altivec:
-/*
- * Disable AltiVec for the task which had AltiVec previously,
- * and save its AltiVec registers in its thread_struct.
- * Enables AltiVec for use in the kernel on return.
- * On SMP we know the AltiVec units are free, since we give it up every
- * switch.  -- Kumar
- */
-	mfmsr	r5
-	oris	r5,r5,MSR_VEC@h
-	MTMSRD(r5)			/* enable use of AltiVec now */
-	isync
-/*
- * For SMP, we don't do lazy AltiVec switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_altivec in switch_to.
- */
-#ifndef CONFIG_SMP
-	tophys(r6,0)
-	addis	r3,r6,last_task_used_altivec@ha
-	lwz	r4,last_task_used_altivec@l(r3)
-	cmpwi	0,r4,0
-	beq	1f
-	add	r4,r4,r6
-	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */
-	SAVE_32VRS(0,r10,r4)
-	mfvscr	vr0
-	li	r10,THREAD_VSCR
-	stvx	vr0,r10,r4
-	lwz	r5,PT_REGS(r4)
-	add	r5,r5,r6
-	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	lis	r10,MSR_VEC@h
-	andc	r4,r4,r10	/* disable altivec for previous task */
-	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-	/* enable use of AltiVec after return */
-	oris	r9,r9,MSR_VEC@h
-	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
-	li	r4,1
-	li	r10,THREAD_VSCR
-	stw	r4,THREAD_USED_VR(r5)
-	lvx	vr0,r10,r5
-	mtvscr	vr0
-	REST_32VRS(0,r10,r5)
-#ifndef CONFIG_SMP
-	subi	r4,r5,THREAD
-	sub	r4,r4,r6
-	stw	r4,last_task_used_altivec@l(r3)
-#endif /* CONFIG_SMP */
-	/* restore registers and return */
-	/* we haven't used ctr or xer or lr */
-	b	fast_exception_return
-
-/*
- * giveup_altivec(tsk)
- * Disable AltiVec for the task given as the argument,
- * and save the AltiVec registers in its thread_struct.
- * Enables AltiVec for use in the kernel on return.
- */
-
-	.globl	giveup_altivec
-giveup_altivec:
-	mfmsr	r5
-	oris	r5,r5,MSR_VEC@h
-	SYNC
-	MTMSRD(r5)			/* enable use of AltiVec now */
-	isync
-	cmpwi	0,r3,0
-	beqlr-				/* if no previous owner, done */
-	addi	r3,r3,THREAD		/* want THREAD of task */
-	lwz	r5,PT_REGS(r3)
-	cmpwi	0,r5,0
-	SAVE_32VRS(0, r4, r3)
-	mfvscr	vr0
-	li	r4,THREAD_VSCR
-	stvx	vr0,r4,r3
-	beq	1f
-	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	lis	r3,MSR_VEC@h
-	andc	r4,r4,r3		/* disable AltiVec for previous task */
-	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
-	li	r5,0
-	lis	r4,last_task_used_altivec@ha
-	stw	r5,last_task_used_altivec@l(r4)
-#endif /* CONFIG_SMP */
-	blr
-#endif /* CONFIG_ALTIVEC */
 
 /*
  * This code is jumped to from the startup code to copy
Index: linux-work/arch/powerpc/kernel/head_64.S
===================================================================
--- linux-work.orig/arch/powerpc/kernel/head_64.S	2009-06-02 15:37:39.000000000 +1000
+++ linux-work/arch/powerpc/kernel/head_64.S	2009-06-02 17:13:42.000000000 +1000
@@ -844,124 +844,6 @@ unrecov_fer:
 	bl	.unrecoverable_exception
 	b	1b
 
-#ifdef CONFIG_ALTIVEC
-/*
- * load_up_altivec(unused, unused, tsk)
- * Disable VMX for the task which had it previously,
- * and save its vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- * On SMP we know the VMX is free, since we give it up every
- * switch (ie, no lazy save of the vector registers).
- * On entry: r13 == 'current' && last_task_used_altivec != 'current'
- */
-_STATIC(load_up_altivec)
-	mfmsr	r5			/* grab the current MSR */
-	oris	r5,r5,MSR_VEC@h
-	mtmsrd	r5			/* enable use of VMX now */
-	isync
-
-/*
- * For SMP, we don't do lazy VMX switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_altvec in switch_to.
- * VRSAVE isn't dealt with here, that is done in the normal context
- * switch code. Note that we could rely on vrsave value to eventually
- * avoid saving all of the VREGs here...
- */
-#ifndef CONFIG_SMP
-	ld	r3,last_task_used_altivec@got(r2)
-	ld	r4,0(r3)
-	cmpdi	0,r4,0
-	beq	1f
-	/* Save VMX state to last_task_used_altivec's THREAD struct */
-	addi	r4,r4,THREAD
-	SAVE_32VRS(0,r5,r4)
-	mfvscr	vr0
-	li	r10,THREAD_VSCR
-	stvx	vr0,r10,r4
-	/* Disable VMX for last_task_used_altivec */
-	ld	r5,PT_REGS(r4)
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	lis	r6,MSR_VEC@h
-	andc	r4,r4,r6
-	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-	/* Hack: if we get an altivec unavailable trap with VRSAVE
-	 * set to all zeros, we assume this is a broken application
-	 * that fails to set it properly, and thus we switch it to
-	 * all 1's
-	 */
-	mfspr	r4,SPRN_VRSAVE
-	cmpdi	0,r4,0
-	bne+	1f
-	li	r4,-1
-	mtspr	SPRN_VRSAVE,r4
-1:
-	/* enable use of VMX after return */
-	ld	r4,PACACURRENT(r13)
-	addi	r5,r4,THREAD		/* Get THREAD */
-	oris	r12,r12,MSR_VEC@h
-	std	r12,_MSR(r1)
-	li	r4,1
-	li	r10,THREAD_VSCR
-	stw	r4,THREAD_USED_VR(r5)
-	lvx	vr0,r10,r5
-	mtvscr	vr0
-	REST_32VRS(0,r4,r5)
-#ifndef CONFIG_SMP
-	/* Update last_task_used_math to 'current' */
-	subi	r4,r5,THREAD		/* Back to 'current' */
-	std	r4,0(r3)
-#endif /* CONFIG_SMP */
-	/* restore registers and return */
-	blr
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * load_up_vsx(unused, unused, tsk)
- * Disable VSX for the task which had it previously,
- * and save its vector registers in its thread_struct.
- * Reuse the fp and vsx saves, but first check to see if they have
- * been saved already.
- * On entry: r13 == 'current' && last_task_used_vsx != 'current'
- */
-_STATIC(load_up_vsx)
-/* Load FP and VSX registers if they haven't been done yet */
-	andi.	r5,r12,MSR_FP
-	beql+	load_up_fpu		/* skip if already loaded */
-	andis.	r5,r12,MSR_VEC@h
-	beql+	load_up_altivec		/* skip if already loaded */
-
-#ifndef CONFIG_SMP
-	ld	r3,last_task_used_vsx@got(r2)
-	ld	r4,0(r3)
-	cmpdi	0,r4,0
-	beq	1f
-	/* Disable VSX for last_task_used_vsx */
-	addi	r4,r4,THREAD
-	ld	r5,PT_REGS(r4)
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	lis	r6,MSR_VSX@h
-	andc	r6,r4,r6
-	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-	ld	r4,PACACURRENT(r13)
-	addi	r4,r4,THREAD		/* Get THREAD */
-	li	r6,1
-	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
-	/* enable use of VSX after return */
-	oris	r12,r12,MSR_VSX@h
-	std	r12,_MSR(r1)
-#ifndef CONFIG_SMP
-	/* Update last_task_used_math to 'current' */
-	ld	r4,PACACURRENT(r13)
-	std	r4,0(r3)
-#endif /* CONFIG_SMP */
-	b	fast_exception_return
-#endif /* CONFIG_VSX */
 
 /*
  * Hash table stuff
Index: linux-work/arch/powerpc/kernel/misc_64.S
===================================================================
--- linux-work.orig/arch/powerpc/kernel/misc_64.S	2009-06-02 15:37:39.000000000 +1000
+++ linux-work/arch/powerpc/kernel/misc_64.S	2009-06-02 15:38:31.000000000 +1000
@@ -457,98 +457,6 @@ _GLOBAL(disable_kernel_fp)
 	isync
 	blr
 
-#ifdef CONFIG_ALTIVEC
-
-#if 0 /* this has no callers for now */
-/*
- * disable_kernel_altivec()
- * Disable the VMX.
- */
-_GLOBAL(disable_kernel_altivec)
-	mfmsr	r3
-	rldicl	r0,r3,(63-MSR_VEC_LG),1
-	rldicl	r3,r0,(MSR_VEC_LG+1),0
-	mtmsrd	r3			/* disable use of VMX now */
-	isync
-	blr
-#endif /* 0 */
-
-/*
- * giveup_altivec(tsk)
- * Disable VMX for the task given as the argument,
- * and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- */
-_GLOBAL(giveup_altivec)
-	mfmsr	r5
-	oris	r5,r5,MSR_VEC@h
-	mtmsrd	r5			/* enable use of VMX now */
-	isync
-	cmpdi	0,r3,0
-	beqlr-				/* if no previous owner, done */
-	addi	r3,r3,THREAD		/* want THREAD of task */
-	ld	r5,PT_REGS(r3)
-	cmpdi	0,r5,0
-	SAVE_32VRS(0,r4,r3)
-	mfvscr	vr0
-	li	r4,THREAD_VSCR
-	stvx	vr0,r4,r3
-	beq	1f
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-	lis	r3,(MSR_VEC|MSR_VSX)@h
-FTR_SECTION_ELSE
-	lis	r3,MSR_VEC@h
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#else
-	lis	r3,MSR_VEC@h
-#endif
-	andc	r4,r4,r3		/* disable FP for previous task */
-	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
-	li	r5,0
-	ld	r4,last_task_used_altivec@got(r2)
-	std	r5,0(r4)
-#endif /* CONFIG_SMP */
-	blr
-
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * __giveup_vsx(tsk)
- * Disable VSX for the task given as the argument.
- * Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
- */
-_GLOBAL(__giveup_vsx)
-	mfmsr	r5
-	oris	r5,r5,MSR_VSX@h
-	mtmsrd	r5			/* enable use of VSX now */
-	isync
-
-	cmpdi	0,r3,0
-	beqlr-				/* if no previous owner, done */
-	addi	r3,r3,THREAD		/* want THREAD of task */
-	ld	r5,PT_REGS(r3)
-	cmpdi	0,r5,0
-	beq	1f
-	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-	lis	r3,MSR_VSX@h
-	andc	r4,r4,r3		/* disable VSX for previous task */
-	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
-	li	r5,0
-	ld	r4,last_task_used_vsx@got(r2)
-	std	r5,0(r4)
-#endif /* CONFIG_SMP */
-	blr
-
-#endif /* CONFIG_VSX */
-
 /* kexec_wait(phys_cpu)
  *
  * wait for the flag to change, indicating this kernel is going away but
Index: linux-work/arch/powerpc/Makefile
===================================================================
--- linux-work.orig/arch/powerpc/Makefile	2009-06-02 17:17:26.000000000 +1000
+++ linux-work/arch/powerpc/Makefile	2009-06-02 17:17:36.000000000 +1000
@@ -142,6 +142,7 @@ head-$(CONFIG_FSL_BOOKE)	:= arch/powerpc
 
 head-$(CONFIG_PPC64)		+= arch/powerpc/kernel/entry_64.o
 head-$(CONFIG_PPC_FPU)		+= arch/powerpc/kernel/fpu.o
+head-$(CONFIG_ALTIVEC)		+= arch/powerpc/kernel/vector.o
 
 core-y				+= arch/powerpc/kernel/ \
 				   arch/powerpc/mm/ \
Index: linux-work/arch/powerpc/kernel/Makefile
===================================================================
--- linux-work.orig/arch/powerpc/kernel/Makefile	2009-06-02 17:16:14.000000000 +1000
+++ linux-work/arch/powerpc/kernel/Makefile	2009-06-02 17:16:30.000000000 +1000
@@ -36,7 +36,7 @@ obj-$(CONFIG_PPC64)		+= setup_64.o sys_p
 				   firmware.o nvram_64.o
 obj64-$(CONFIG_RELOCATABLE)	+= reloc_64.o
 obj-$(CONFIG_PPC64)		+= vdso64/
-obj-$(CONFIG_ALTIVEC)		+= vecemu.o vector.o
+obj-$(CONFIG_ALTIVEC)		+= vecemu.o
 obj-$(CONFIG_PPC_970_NAP)	+= idle_power4.o
 obj-$(CONFIG_PPC_OF)		+= of_device.o of_platform.o prom_parse.o
 obj-$(CONFIG_PPC_CLOCK)		+= clock.o
@@ -108,6 +108,7 @@ obj-y				+= ppc_save_regs.o
 endif
 
 extra-$(CONFIG_PPC_FPU)		+= fpu.o
+extra-$(CONFIG_ALTIVEC)		+= vector.o
 extra-$(CONFIG_PPC64)		+= entry_64.o
 
 extra-y				+= systbl_chk.i

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 2/5] powerpc: Split exception handling out of head_64.S
  2009-06-02  7:50 [PATCH 0/5] powerpc: A bit more way paved toward 64-bit Book3E support Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 1/5] powerpc: Move VMX and VSX asm code to vector.S Benjamin Herrenschmidt
@ 2009-06-02  7:50 ` Benjamin Herrenschmidt
  2009-06-02 10:32   ` Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S Benjamin Herrenschmidt
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02  7:50 UTC (permalink / raw)
  To: linuxppc-dev

To prepare for future support of Book3E 64-bit PowerPC processors,
which use a completely different exception handling, we move that
code to a new exceptions-64s.S file.

This file is #included from head_64.S due to some of the absolute
address requirements which can currently only be fulfilled from
within that file.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/kernel/exceptions-64s.S |  978 +++++++++++++++++++++++++++++++++++
 arch/powerpc/kernel/head_64.S        |  977 ----------------------------------
 2 files changed, 995 insertions(+), 960 deletions(-)

--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-work/arch/powerpc/kernel/exceptions-64s.S	2009-06-02 15:47:18.000000000 +1000
@@ -0,0 +1,978 @@
+/*
+ * This file contains the 64-bit "server" PowerPC variant
+ * of the low level exception handling including exception
+ * vectors, exception return, part of the slb and stab
+ * handling and other fixed offset specific things.
+ *
+ * This file is meant to be #included from head_64.S due to
+ * position dependant assembly.
+ *
+ * Most of this originates from head_64.S and thus has the same
+ * copyright history.
+ *
+ */
+
+/*
+ * We layout physical memory as follows:
+ * 0x0000 - 0x00ff : Secondary processor spin code
+ * 0x0100 - 0x2fff : pSeries Interrupt prologs
+ * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
+ * 0x6000 - 0x6fff : Initial (CPU0) segment table
+ * 0x7000 - 0x7fff : FWNMI data area
+ * 0x8000 -        : Early init and support code
+ */
+
+
+/*
+ *   SPRG Usage
+ *
+ *   Register	Definition
+ *
+ *   SPRG0	reserved for hypervisor
+ *   SPRG1	temp - used to save gpr
+ *   SPRG2	temp - used to save gpr
+ *   SPRG3	virt addr of paca
+ */
+
+/*
+ * This is the start of the interrupt handlers for pSeries
+ * This code runs with relocation off.
+ * Code from here to __end_interrupts gets copied down to real
+ * address 0x100 when we are running a relocatable kernel.
+ * Therefore any relative branches in this section must only
+ * branch to labels in this section.
+ */
+	. = 0x100
+	.globl __start_interrupts
+__start_interrupts:
+
+	STD_EXCEPTION_PSERIES(0x100, system_reset)
+
+	. = 0x200
+_machine_check_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRN_SPRG1,r13		/* save r13 */
+	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+	. = 0x300
+	.globl data_access_pSeries
+data_access_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRN_SPRG1,r13
+BEGIN_FTR_SECTION
+	mtspr	SPRN_SPRG2,r12
+	mfspr	r13,SPRN_DAR
+	mfspr	r12,SPRN_DSISR
+	srdi	r13,r13,60
+	rlwimi	r13,r12,16,0x20
+	mfcr	r12
+	cmpwi	r13,0x2c
+	beq	do_stab_bolted_pSeries
+	mtcrf	0x80,r12
+	mfspr	r12,SPRN_SPRG2
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
+
+	. = 0x380
+	.globl data_access_slb_pSeries
+data_access_slb_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRN_SPRG1,r13
+	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
+	std	r3,PACA_EXSLB+EX_R3(r13)
+	mfspr	r3,SPRN_DAR
+	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
+	mfcr	r9
+#ifdef __DISABLED__
+	/* Keep that around for when we re-implement dynamic VSIDs */
+	cmpdi	r3,0
+	bge	slb_miss_user_pseries
+#endif /* __DISABLED__ */
+	std	r10,PACA_EXSLB+EX_R10(r13)
+	std	r11,PACA_EXSLB+EX_R11(r13)
+	std	r12,PACA_EXSLB+EX_R12(r13)
+	mfspr	r10,SPRN_SPRG1
+	std	r10,PACA_EXSLB+EX_R13(r13)
+	mfspr	r12,SPRN_SRR1		/* and SRR1 */
+#ifndef CONFIG_RELOCATABLE
+	b	.slb_miss_realmode
+#else
+	/*
+	 * We can't just use a direct branch to .slb_miss_realmode
+	 * because the distance from here to there depends on where
+	 * the kernel ends up being put.
+	 */
+	mfctr	r11
+	ld	r10,PACAKBASE(r13)
+	LOAD_HANDLER(r10, .slb_miss_realmode)
+	mtctr	r10
+	bctr
+#endif
+
+	STD_EXCEPTION_PSERIES(0x400, instruction_access)
+
+	. = 0x480
+	.globl instruction_access_slb_pSeries
+instruction_access_slb_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRN_SPRG1,r13
+	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
+	std	r3,PACA_EXSLB+EX_R3(r13)
+	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
+	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
+	mfcr	r9
+#ifdef __DISABLED__
+	/* Keep that around for when we re-implement dynamic VSIDs */
+	cmpdi	r3,0
+	bge	slb_miss_user_pseries
+#endif /* __DISABLED__ */
+	std	r10,PACA_EXSLB+EX_R10(r13)
+	std	r11,PACA_EXSLB+EX_R11(r13)
+	std	r12,PACA_EXSLB+EX_R12(r13)
+	mfspr	r10,SPRN_SPRG1
+	std	r10,PACA_EXSLB+EX_R13(r13)
+	mfspr	r12,SPRN_SRR1		/* and SRR1 */
+#ifndef CONFIG_RELOCATABLE
+	b	.slb_miss_realmode
+#else
+	mfctr	r11
+	ld	r10,PACAKBASE(r13)
+	LOAD_HANDLER(r10, .slb_miss_realmode)
+	mtctr	r10
+	bctr
+#endif
+
+	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+	STD_EXCEPTION_PSERIES(0x600, alignment)
+	STD_EXCEPTION_PSERIES(0x700, program_check)
+	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
+	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
+	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+
+	. = 0xc00
+	.globl	system_call_pSeries
+system_call_pSeries:
+	HMT_MEDIUM
+BEGIN_FTR_SECTION
+	cmpdi	r0,0x1ebe
+	beq-	1f
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
+	mr	r9,r13
+	mfspr	r13,SPRN_SPRG3
+	mfspr	r11,SPRN_SRR0
+	ld	r12,PACAKBASE(r13)
+	ld	r10,PACAKMSR(r13)
+	LOAD_HANDLER(r12, system_call_entry)
+	mtspr	SPRN_SRR0,r12
+	mfspr	r12,SPRN_SRR1
+	mtspr	SPRN_SRR1,r10
+	rfid
+	b	.	/* prevent speculative execution */
+
+/* Fast LE/BE switch system call */
+1:	mfspr	r12,SPRN_SRR1
+	xori	r12,r12,MSR_LE
+	mtspr	SPRN_SRR1,r12
+	rfid		/* return to userspace */
+	b	.
+
+	STD_EXCEPTION_PSERIES(0xd00, single_step)
+	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
+
+	/* We need to deal with the Altivec unavailable exception
+	 * here which is at 0xf20, thus in the middle of the
+	 * prolog code of the PerformanceMonitor one. A little
+	 * trickery is thus necessary
+	 */
+	. = 0xf00
+	b	performance_monitor_pSeries
+
+	. = 0xf20
+	b	altivec_unavailable_pSeries
+
+	. = 0xf40
+	b	vsx_unavailable_pSeries
+
+#ifdef CONFIG_CBE_RAS
+	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
+#endif /* CONFIG_CBE_RAS */
+	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+#ifdef CONFIG_CBE_RAS
+	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
+#endif /* CONFIG_CBE_RAS */
+	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+#ifdef CONFIG_CBE_RAS
+	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
+#endif /* CONFIG_CBE_RAS */
+
+	. = 0x3000
+
+/*** pSeries interrupt support ***/
+
+	/* moved from 0xf00 */
+	STD_EXCEPTION_PSERIES(., performance_monitor)
+	STD_EXCEPTION_PSERIES(., altivec_unavailable)
+	STD_EXCEPTION_PSERIES(., vsx_unavailable)
+
+/*
+ * An interrupt came in while soft-disabled; clear EE in SRR1,
+ * clear paca->hard_enabled and return.
+ */
+masked_interrupt:
+	stb	r10,PACAHARDIRQEN(r13)
+	mtcrf	0x80,r9
+	ld	r9,PACA_EXGEN+EX_R9(r13)
+	mfspr	r10,SPRN_SRR1
+	rldicl	r10,r10,48,1		/* clear MSR_EE */
+	rotldi	r10,r10,16
+	mtspr	SPRN_SRR1,r10
+	ld	r10,PACA_EXGEN+EX_R10(r13)
+	mfspr	r13,SPRN_SPRG1
+	rfid
+	b	.
+
+	.align	7
+do_stab_bolted_pSeries:
+	mtcrf	0x80,r12
+	mfspr	r12,SPRN_SPRG2
+	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Vectors for the FWNMI option.  Share common code.
+ */
+	.globl system_reset_fwnmi
+      .align 7
+system_reset_fwnmi:
+	HMT_MEDIUM
+	mtspr	SPRN_SPRG1,r13		/* save r13 */
+	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+
+	.globl machine_check_fwnmi
+      .align 7
+machine_check_fwnmi:
+	HMT_MEDIUM
+	mtspr	SPRN_SPRG1,r13		/* save r13 */
+	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef __DISABLED__
+/*
+ * This is used for when the SLB miss handler has to go virtual,
+ * which doesn't happen for now anymore but will once we re-implement
+ * dynamic VSIDs for shared page tables
+ */
+slb_miss_user_pseries:
+	std	r10,PACA_EXGEN+EX_R10(r13)
+	std	r11,PACA_EXGEN+EX_R11(r13)
+	std	r12,PACA_EXGEN+EX_R12(r13)
+	mfspr	r10,SPRG1
+	ld	r11,PACA_EXSLB+EX_R9(r13)
+	ld	r12,PACA_EXSLB+EX_R3(r13)
+	std	r10,PACA_EXGEN+EX_R13(r13)
+	std	r11,PACA_EXGEN+EX_R9(r13)
+	std	r12,PACA_EXGEN+EX_R3(r13)
+	clrrdi	r12,r13,32
+	mfmsr	r10
+	mfspr	r11,SRR0			/* save SRR0 */
+	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
+	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
+	mtspr	SRR0,r12
+	mfspr	r12,SRR1			/* and SRR1 */
+	mtspr	SRR1,r10
+	rfid
+	b	.				/* prevent spec. execution */
+#endif /* __DISABLED__ */
+
+	.align	7
+	.globl	__end_interrupts
+__end_interrupts:
+
+/*
+ * Code from here down to __end_handlers is invoked from the
+ * exception prologs above.  Because the prologs assemble the
+ * addresses of these handlers using the LOAD_HANDLER macro,
+ * which uses an addi instruction, these handlers must be in
+ * the first 32k of the kernel image.
+ */
+
+/*** Common interrupt handlers ***/
+
+	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
+
+	/*
+	 * Machine check is different because we use a different
+	 * save area: PACA_EXMC instead of PACA_EXGEN.
+	 */
+	.align	7
+	.globl machine_check_common
+machine_check_common:
+	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+	FINISH_NAP
+	DISABLE_INTS
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.machine_check_exception
+	b	.ret_from_except
+
+	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
+	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
+	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
+	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
+	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
+	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+#ifdef CONFIG_ALTIVEC
+	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
+#else
+	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
+#endif
+#ifdef CONFIG_CBE_RAS
+	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
+	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
+	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
+#endif /* CONFIG_CBE_RAS */
+
+	.align	7
+system_call_entry:
+	b	system_call_common
+
+/*
+ * Here we have detected that the kernel stack pointer is bad.
+ * R9 contains the saved CR, r13 points to the paca,
+ * r10 contains the (bad) kernel stack pointer,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * We switch to using an emergency stack, save the registers there,
+ * and call kernel_bad_stack(), which panics.
+ */
+bad_stack:
+	ld	r1,PACAEMERGSP(r13)
+	subi	r1,r1,64+INT_FRAME_SIZE
+	std	r9,_CCR(r1)
+	std	r10,GPR1(r1)
+	std	r11,_NIP(r1)
+	std	r12,_MSR(r1)
+	mfspr	r11,SPRN_DAR
+	mfspr	r12,SPRN_DSISR
+	std	r11,_DAR(r1)
+	std	r12,_DSISR(r1)
+	mflr	r10
+	mfctr	r11
+	mfxer	r12
+	std	r10,_LINK(r1)
+	std	r11,_CTR(r1)
+	std	r12,_XER(r1)
+	SAVE_GPR(0,r1)
+	SAVE_GPR(2,r1)
+	SAVE_4GPRS(3,r1)
+	SAVE_2GPRS(7,r1)
+	SAVE_10GPRS(12,r1)
+	SAVE_10GPRS(22,r1)
+	lhz	r12,PACA_TRAP_SAVE(r13)
+	std	r12,_TRAP(r1)
+	addi	r11,r1,INT_FRAME_SIZE
+	std	r11,0(r1)
+	li	r12,0
+	std	r12,0(r11)
+	ld	r2,PACATOC(r13)
+1:	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.kernel_bad_stack
+	b	1b
+
+/*
+ * Here r13 points to the paca, r9 contains the saved CR,
+ * SRR0 and SRR1 are saved in r11 and r12,
+ * r9 - r13 are saved in paca->exgen.
+ */
+	.align	7
+	.globl data_access_common
+data_access_common:
+	mfspr	r10,SPRN_DAR
+	std	r10,PACA_EXGEN+EX_DAR(r13)
+	mfspr	r10,SPRN_DSISR
+	stw	r10,PACA_EXGEN+EX_DSISR(r13)
+	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+	ld	r3,PACA_EXGEN+EX_DAR(r13)
+	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
+	li	r5,0x300
+	b	.do_hash_page	 	/* Try to handle as hpte fault */
+
+	.align	7
+	.globl instruction_access_common
+instruction_access_common:
+	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+	ld	r3,_NIP(r1)
+	andis.	r4,r12,0x5820
+	li	r5,0x400
+	b	.do_hash_page		/* Try to handle as hpte fault */
+
+/*
+ * Here is the common SLB miss user that is used when going to virtual
+ * mode for SLB misses, that is currently not used
+ */
+#ifdef __DISABLED__
+	.align	7
+	.globl	slb_miss_user_common
+slb_miss_user_common:
+	mflr	r10
+	std	r3,PACA_EXGEN+EX_DAR(r13)
+	stw	r9,PACA_EXGEN+EX_CCR(r13)
+	std	r10,PACA_EXGEN+EX_LR(r13)
+	std	r11,PACA_EXGEN+EX_SRR0(r13)
+	bl	.slb_allocate_user
+
+	ld	r10,PACA_EXGEN+EX_LR(r13)
+	ld	r3,PACA_EXGEN+EX_R3(r13)
+	lwz	r9,PACA_EXGEN+EX_CCR(r13)
+	ld	r11,PACA_EXGEN+EX_SRR0(r13)
+	mtlr	r10
+	beq-	slb_miss_fault
+
+	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
+	beq-	unrecov_user_slb
+	mfmsr	r10
+
+.machine push
+.machine "power4"
+	mtcrf	0x80,r9
+.machine pop
+
+	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
+	mtmsrd	r10,1
+
+	mtspr	SRR0,r11
+	mtspr	SRR1,r12
+
+	ld	r9,PACA_EXGEN+EX_R9(r13)
+	ld	r10,PACA_EXGEN+EX_R10(r13)
+	ld	r11,PACA_EXGEN+EX_R11(r13)
+	ld	r12,PACA_EXGEN+EX_R12(r13)
+	ld	r13,PACA_EXGEN+EX_R13(r13)
+	rfid
+	b	.
+
+slb_miss_fault:
+	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
+	ld	r4,PACA_EXGEN+EX_DAR(r13)
+	li	r5,0
+	std	r4,_DAR(r1)
+	std	r5,_DSISR(r1)
+	b	handle_page_fault
+
+unrecov_user_slb:
+	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
+	DISABLE_INTS
+	bl	.save_nvgprs
+1:	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.unrecoverable_exception
+	b	1b
+
+#endif /* __DISABLED__ */
+
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+	mflr	r10
+#ifdef CONFIG_RELOCATABLE
+	mtctr	r11
+#endif
+
+	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
+	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
+
+	bl	.slb_allocate_realmode
+
+	/* All done -- return from exception. */
+
+	ld	r10,PACA_EXSLB+EX_LR(r13)
+	ld	r3,PACA_EXSLB+EX_R3(r13)
+	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+	ld	r11,PACALPPACAPTR(r13)
+	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+
+	mtlr	r10
+
+	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
+	beq-	2f
+
+.machine	push
+.machine	"power4"
+	mtcrf	0x80,r9
+	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
+.machine	pop
+
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+	mtspr	SPRN_SRR0,r11
+	mtspr	SPRN_SRR1,r12
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+	ld	r9,PACA_EXSLB+EX_R9(r13)
+	ld	r10,PACA_EXSLB+EX_R10(r13)
+	ld	r11,PACA_EXSLB+EX_R11(r13)
+	ld	r12,PACA_EXSLB+EX_R12(r13)
+	ld	r13,PACA_EXSLB+EX_R13(r13)
+	rfid
+	b	.	/* prevent speculative execution */
+
+2:
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+	b	unrecov_slb
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+	mfspr	r11,SPRN_SRR0
+	ld	r10,PACAKBASE(r13)
+	LOAD_HANDLER(r10,unrecov_slb)
+	mtspr	SPRN_SRR0,r10
+	ld	r10,PACAKMSR(r13)
+	mtspr	SPRN_SRR1,r10
+	rfid
+	b	.
+
+unrecov_slb:
+	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+	DISABLE_INTS
+	bl	.save_nvgprs
+1:	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.unrecoverable_exception
+	b	1b
+
+	.align	7
+	.globl hardware_interrupt_common
+	.globl hardware_interrupt_entry
+hardware_interrupt_common:
+	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
+	FINISH_NAP
+hardware_interrupt_entry:
+	DISABLE_INTS
+BEGIN_FTR_SECTION
+	bl	.ppc64_runlatch_on
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.do_IRQ
+	b	.ret_from_except_lite
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+	andc	r9,r9,r10
+	std	r9,TI_LOCAL_FLAGS(r11)
+	ld	r10,_LINK(r1)		/* make idle task do the */
+	std	r10,_NIP(r1)		/* equivalent of a blr */
+	blr
+#endif
+
+	.align	7
+	.globl alignment_common
+alignment_common:
+	mfspr	r10,SPRN_DAR
+	std	r10,PACA_EXGEN+EX_DAR(r13)
+	mfspr	r10,SPRN_DSISR
+	stw	r10,PACA_EXGEN+EX_DSISR(r13)
+	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+	ld	r3,PACA_EXGEN+EX_DAR(r13)
+	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
+	std	r3,_DAR(r1)
+	std	r4,_DSISR(r1)
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.alignment_exception
+	b	.ret_from_except
+
+	.align	7
+	.globl program_check_common
+program_check_common:
+	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.program_check_exception
+	b	.ret_from_except
+
+	.align	7
+	.globl fp_unavailable_common
+fp_unavailable_common:
+	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
+	bne	1f			/* if from user, just load it up */
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.kernel_fp_unavailable_exception
+	BUG_OPCODE
+1:	bl	.load_up_fpu
+	b	fast_exception_return
+
+	.align	7
+	.globl altivec_unavailable_common
+altivec_unavailable_common:
+	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+	beq	1f
+	bl	.load_up_altivec
+	b	fast_exception_return
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.altivec_unavailable_exception
+	b	.ret_from_except
+
+	.align	7
+	.globl vsx_unavailable_common
+vsx_unavailable_common:
+	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+	bne	.load_up_vsx
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.vsx_unavailable_exception
+	b	.ret_from_except
+
+	.align	7
+	.globl	__end_handlers
+__end_handlers:
+
+/*
+ * Return from an exception with minimal checks.
+ * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
+ * If interrupts have been enabled, or anything has been
+ * done that might have changed the scheduling status of
+ * any task or sent any task a signal, you should use
+ * ret_from_except or ret_from_except_lite instead of this.
+ */
+fast_exc_return_irq:			/* restores irq state too */
+	ld	r3,SOFTE(r1)
+	TRACE_AND_RESTORE_IRQ(r3);
+	ld	r12,_MSR(r1)
+	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
+	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
+	b	1f
+
+	.globl	fast_exception_return
+fast_exception_return:
+	ld	r12,_MSR(r1)
+1:	ld	r11,_NIP(r1)
+	andi.	r3,r12,MSR_RI		/* check if RI is set */
+	beq-	unrecov_fer
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	andi.	r3,r12,MSR_PR
+	beq	2f
+	ACCOUNT_CPU_USER_EXIT(r3, r4)
+2:
+#endif
+
+	ld	r3,_CCR(r1)
+	ld	r4,_LINK(r1)
+	ld	r5,_CTR(r1)
+	ld	r6,_XER(r1)
+	mtcr	r3
+	mtlr	r4
+	mtctr	r5
+	mtxer	r6
+	REST_GPR(0, r1)
+	REST_8GPRS(2, r1)
+
+	mfmsr	r10
+	rldicl	r10,r10,48,1		/* clear EE */
+	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
+	mtmsrd	r10,1
+
+	mtspr	SPRN_SRR1,r12
+	mtspr	SPRN_SRR0,r11
+	REST_4GPRS(10, r1)
+	ld	r1,GPR1(r1)
+	rfid
+	b	.	/* prevent speculative execution */
+
+unrecov_fer:
+	bl	.save_nvgprs
+1:	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.unrecoverable_exception
+	b	1b
+
+
+/*
+ * Hash table stuff
+ */
+	.align	7
+_STATIC(do_hash_page)
+	std	r3,_DAR(r1)
+	std	r4,_DSISR(r1)
+
+	andis.	r0,r4,0xa450		/* weird error? */
+	bne-	handle_page_fault	/* if not, try to insert a HPTE */
+BEGIN_FTR_SECTION
+	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
+	bne-	do_ste_alloc		/* If so handle it */
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+
+	/*
+	 * On iSeries, we soft-disable interrupts here, then
+	 * hard-enable interrupts so that the hash_page code can spin on
+	 * the hash_table_lock without problems on a shared processor.
+	 */
+	DISABLE_INTS
+
+	/*
+	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
+	 * and will clobber volatile registers when irq tracing is enabled
+	 * so we need to reload them. It may be possible to be smarter here
+	 * and move the irq tracing elsewhere but let's keep it simple for
+	 * now
+	 */
+#ifdef CONFIG_TRACE_IRQFLAGS
+	ld	r3,_DAR(r1)
+	ld	r4,_DSISR(r1)
+	ld	r5,_TRAP(r1)
+	ld	r12,_MSR(r1)
+	clrrdi	r5,r5,4
+#endif /* CONFIG_TRACE_IRQFLAGS */
+	/*
+	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+	 * accessing a userspace segment (even from the kernel). We assume
+	 * kernel addresses always have the high bit set.
+	 */
+	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
+	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
+	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
+	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
+	ori	r4,r4,1			/* add _PAGE_PRESENT */
+	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
+
+	/*
+	 * r3 contains the faulting address
+	 * r4 contains the required access permissions
+	 * r5 contains the trap number
+	 *
+	 * at return r3 = 0 for success
+	 */
+	bl	.hash_page		/* build HPTE if possible */
+	cmpdi	r3,0			/* see if hash_page succeeded */
+
+BEGIN_FW_FTR_SECTION
+	/*
+	 * If we had interrupts soft-enabled at the point where the
+	 * DSI/ISI occurred, and an interrupt came in during hash_page,
+	 * handle it now.
+	 * We jump to ret_from_except_lite rather than fast_exception_return
+	 * because ret_from_except_lite will check for and handle pending
+	 * interrupts if necessary.
+	 */
+	beq	13f
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+
+BEGIN_FW_FTR_SECTION
+	/*
+	 * Here we have interrupts hard-disabled, so it is sufficient
+	 * to restore paca->{soft,hard}_enable and get out.
+	 */
+	beq	fast_exc_return_irq	/* Return from exception on success */
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
+	/* For a hash failure, we don't bother re-enabling interrupts */
+	ble-	12f
+
+	/*
+	 * hash_page couldn't handle it, set soft interrupt enable back
+	 * to what it was before the trap.  Note that .raw_local_irq_restore
+	 * handles any interrupts pending at this point.
+	 */
+	ld	r3,SOFTE(r1)
+	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
+	bl	.raw_local_irq_restore
+	b	11f
+
+/* Here we have a page fault that hash_page can't handle. */
+handle_page_fault:
+	ENABLE_INTS
+11:	ld	r4,_DAR(r1)
+	ld	r5,_DSISR(r1)
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.do_page_fault
+	cmpdi	r3,0
+	beq+	13f
+	bl	.save_nvgprs
+	mr	r5,r3
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	lwz	r4,_DAR(r1)
+	bl	.bad_page_fault
+	b	.ret_from_except
+
+13:	b	.ret_from_except_lite
+
+/* We have a page fault that hash_page could handle but HV refused
+ * the PTE insertion
+ */
+12:	bl	.save_nvgprs
+	mr	r5,r3
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ld	r4,_DAR(r1)
+	bl	.low_hash_fault
+	b	.ret_from_except
+
+	/* here we have a segment miss */
+do_ste_alloc:
+	bl	.ste_allocate		/* try to insert stab entry */
+	cmpdi	r3,0
+	bne-	handle_page_fault
+	b	fast_exception_return
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r9 - r13 are saved in paca->exslb.
+ * We assume we aren't going to take any exceptions during this procedure.
+ * We assume (DAR >> 60) == 0xc.
+ */
+	.align	7
+_GLOBAL(do_stab_bolted)
+	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
+	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
+
+	/* Hash to the primary group */
+	ld	r10,PACASTABVIRT(r13)
+	mfspr	r11,SPRN_DAR
+	srdi	r11,r11,28
+	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
+
+	/* Calculate VSID */
+	/* This is a kernel address, so protovsid = ESID */
+	ASM_VSID_SCRAMBLE(r11, r9, 256M)
+	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
+
+	/* Search the primary group for a free entry */
+1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
+	andi.	r11,r11,0x80
+	beq	2f
+	addi	r10,r10,16
+	andi.	r11,r10,0x70
+	bne	1b
+
+	/* Stick for only searching the primary group for now.		*/
+	/* At least for now, we use a very simple random castout scheme */
+	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
+	mftb	r11
+	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
+	ori	r11,r11,0x10
+
+	/* r10 currently points to an ste one past the group of interest */
+	/* make it point to the randomly selected entry			*/
+	subi	r10,r10,128
+	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
+
+	isync			/* mark the entry invalid		*/
+	ld	r11,0(r10)
+	rldicl	r11,r11,56,1	/* clear the valid bit */
+	rotldi	r11,r11,8
+	std	r11,0(r10)
+	sync
+
+	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
+	slbie	r11
+
+2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
+	eieio
+
+	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
+	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
+	ori	r11,r11,0x90	/* Turn on valid and kp			*/
+	std	r11,0(r10)	/* Put new entry back into the stab	*/
+
+	sync
+
+	/* All done -- return from exception. */
+	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
+	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
+
+	andi.	r10,r12,MSR_RI
+	beq-	unrecov_slb
+
+	mtcrf	0x80,r9			/* restore CR */
+
+	mfmsr	r10
+	clrrdi	r10,r10,2
+	mtmsrd	r10,1
+
+	mtspr	SPRN_SRR0,r11
+	mtspr	SPRN_SRR1,r12
+	ld	r9,PACA_EXSLB+EX_R9(r13)
+	ld	r10,PACA_EXSLB+EX_R10(r13)
+	ld	r11,PACA_EXSLB+EX_R11(r13)
+	ld	r12,PACA_EXSLB+EX_R12(r13)
+	ld	r13,PACA_EXSLB+EX_R13(r13)
+	rfid
+	b	.	/* prevent speculative execution */
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on).  The address is given to the hv
+ * as a page number (see xLparMap below), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+	. = STAB0_OFFSET	/* 0x6000 */
+	.globl initial_stab
+initial_stab:
+	.space	4096
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ */
+	.= 0x7000
+	.globl fwnmi_data_area
+fwnmi_data_area:
+#endif /* CONFIG_PPC_PSERIES */
+
+	/* iSeries does not use the FWNMI stuff, so it is safe to put
+	 * this here, even if we later allow kernels that will boot on
+	 * both pSeries and iSeries */
+#ifdef CONFIG_PPC_ISERIES
+        . = LPARMAP_PHYS
+	.globl xLparMap
+xLparMap:
+	.quad	HvEsidsToMap		/* xNumberEsids */
+	.quad	HvRangesToMap		/* xNumberRanges */
+	.quad	STAB0_PAGE		/* xSegmentTableOffs */
+	.zero	40			/* xRsvd */
+	/* xEsids (HvEsidsToMap entries of 2 quads) */
+	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
+	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
+	.quad	VMALLOC_START_ESID	/* xKernelEsid */
+	.quad	VMALLOC_START_VSID	/* xKernelVsid */
+	/* xRanges (HvRangesToMap entries of 3 quads) */
+	.quad	HvPagesToMap		/* xPages */
+	.quad	0			/* xOffset */
+	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
+
+#endif /* CONFIG_PPC_ISERIES */
+
+#ifdef CONFIG_PPC_PSERIES
+        . = 0x8000
+#endif /* CONFIG_PPC_PSERIES */
Index: linux-work/arch/powerpc/kernel/head_64.S
===================================================================
--- linux-work.orig/arch/powerpc/kernel/head_64.S	2009-06-02 15:44:52.000000000 +1000
+++ linux-work/arch/powerpc/kernel/head_64.S	2009-06-02 15:56:33.000000000 +1000
@@ -12,8 +12,9 @@
  *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
  *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
  *
- *  This file contains the low-level support and setup for the
- *  PowerPC-64 platform, including trap and interrupt dispatch.
+ *  This file contains the entry point for the 64-bit kernel along
+ *  with some early initialization code common to all 64-bit powerpc
+ *  variants.
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
@@ -38,36 +39,25 @@
 #include <asm/exception.h>
 #include <asm/irqflags.h>
 
-/*
- * We layout physical memory as follows:
- * 0x0000 - 0x00ff : Secondary processor spin code
- * 0x0100 - 0x2fff : pSeries Interrupt prologs
- * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
- * 0x6000 - 0x6fff : Initial (CPU0) segment table
- * 0x7000 - 0x7fff : FWNMI data area
- * 0x8000 -        : Early init and support code
- */
-
-/*
- *   SPRG Usage
- *
- *   Register	Definition
- *
- *   SPRG0	reserved for hypervisor
- *   SPRG1	temp - used to save gpr
- *   SPRG2	temp - used to save gpr
- *   SPRG3	virt addr of paca
+/* The physical memory is layed out such that the secondary processor
+ * spin code sits at 0x0000...0x00ff. On server, the vectors follow
+ * using the layout described in exceptions-64s.S
  */
 
 /*
  * Entering into this code we make the following assumptions:
- *  For pSeries:
+ *
+ *  For pSeries or server processors:
  *   1. The MMU is off & open firmware is running in real mode.
  *   2. The kernel is entered at __start
  *
  *  For iSeries:
  *   1. The MMU is on (as it always is for iSeries)
  *   2. The kernel is entered at system_reset_iSeries
+ *
+ *  For Book3E processors:
+ *   1. The MMU is on running in AS0 in a state defined in ePAPR
+ *   2. The kernel is entered at __start
  */
 
 	.text
@@ -166,947 +156,14 @@ exception_marker:
 	.text
 
 /*
- * This is the start of the interrupt handlers for pSeries
- * This code runs with relocation off.
- * Code from here to __end_interrupts gets copied down to real
- * address 0x100 when we are running a relocatable kernel.
- * Therefore any relative branches in this section must only
- * branch to labels in this section.
- */
-	. = 0x100
-	.globl __start_interrupts
-__start_interrupts:
-
-	STD_EXCEPTION_PSERIES(0x100, system_reset)
-
-	. = 0x200
-_machine_check_pSeries:
-	HMT_MEDIUM
-	mtspr	SPRN_SPRG1,r13		/* save r13 */
-	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
-	. = 0x300
-	.globl data_access_pSeries
-data_access_pSeries:
-	HMT_MEDIUM
-	mtspr	SPRN_SPRG1,r13
-BEGIN_FTR_SECTION
-	mtspr	SPRN_SPRG2,r12
-	mfspr	r13,SPRN_DAR
-	mfspr	r12,SPRN_DSISR
-	srdi	r13,r13,60
-	rlwimi	r13,r12,16,0x20
-	mfcr	r12
-	cmpwi	r13,0x2c
-	beq	do_stab_bolted_pSeries
-	mtcrf	0x80,r12
-	mfspr	r12,SPRN_SPRG2
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
-
-	. = 0x380
-	.globl data_access_slb_pSeries
-data_access_slb_pSeries:
-	HMT_MEDIUM
-	mtspr	SPRN_SPRG1,r13
-	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
-	std	r3,PACA_EXSLB+EX_R3(r13)
-	mfspr	r3,SPRN_DAR
-	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
-	mfcr	r9
-#ifdef __DISABLED__
-	/* Keep that around for when we re-implement dynamic VSIDs */
-	cmpdi	r3,0
-	bge	slb_miss_user_pseries
-#endif /* __DISABLED__ */
-	std	r10,PACA_EXSLB+EX_R10(r13)
-	std	r11,PACA_EXSLB+EX_R11(r13)
-	std	r12,PACA_EXSLB+EX_R12(r13)
-	mfspr	r10,SPRN_SPRG1
-	std	r10,PACA_EXSLB+EX_R13(r13)
-	mfspr	r12,SPRN_SRR1		/* and SRR1 */
-#ifndef CONFIG_RELOCATABLE
-	b	.slb_miss_realmode
-#else
-	/*
-	 * We can't just use a direct branch to .slb_miss_realmode
-	 * because the distance from here to there depends on where
-	 * the kernel ends up being put.
-	 */
-	mfctr	r11
-	ld	r10,PACAKBASE(r13)
-	LOAD_HANDLER(r10, .slb_miss_realmode)
-	mtctr	r10
-	bctr
-#endif
-
-	STD_EXCEPTION_PSERIES(0x400, instruction_access)
-
-	. = 0x480
-	.globl instruction_access_slb_pSeries
-instruction_access_slb_pSeries:
-	HMT_MEDIUM
-	mtspr	SPRN_SPRG1,r13
-	mfspr	r13,SPRN_SPRG3		/* get paca address into r13 */
-	std	r3,PACA_EXSLB+EX_R3(r13)
-	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
-	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
-	mfcr	r9
-#ifdef __DISABLED__
-	/* Keep that around for when we re-implement dynamic VSIDs */
-	cmpdi	r3,0
-	bge	slb_miss_user_pseries
-#endif /* __DISABLED__ */
-	std	r10,PACA_EXSLB+EX_R10(r13)
-	std	r11,PACA_EXSLB+EX_R11(r13)
-	std	r12,PACA_EXSLB+EX_R12(r13)
-	mfspr	r10,SPRN_SPRG1
-	std	r10,PACA_EXSLB+EX_R13(r13)
-	mfspr	r12,SPRN_SRR1		/* and SRR1 */
-#ifndef CONFIG_RELOCATABLE
-	b	.slb_miss_realmode
-#else
-	mfctr	r11
-	ld	r10,PACAKBASE(r13)
-	LOAD_HANDLER(r10, .slb_miss_realmode)
-	mtctr	r10
-	bctr
-#endif
-
-	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
-	STD_EXCEPTION_PSERIES(0x600, alignment)
-	STD_EXCEPTION_PSERIES(0x700, program_check)
-	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
-	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
-	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
-	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
-
-	. = 0xc00
-	.globl	system_call_pSeries
-system_call_pSeries:
-	HMT_MEDIUM
-BEGIN_FTR_SECTION
-	cmpdi	r0,0x1ebe
-	beq-	1f
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
-	mr	r9,r13
-	mfspr	r13,SPRN_SPRG3
-	mfspr	r11,SPRN_SRR0
-	ld	r12,PACAKBASE(r13)
-	ld	r10,PACAKMSR(r13)
-	LOAD_HANDLER(r12, system_call_entry)
-	mtspr	SPRN_SRR0,r12
-	mfspr	r12,SPRN_SRR1
-	mtspr	SPRN_SRR1,r10
-	rfid
-	b	.	/* prevent speculative execution */
-
-/* Fast LE/BE switch system call */
-1:	mfspr	r12,SPRN_SRR1
-	xori	r12,r12,MSR_LE
-	mtspr	SPRN_SRR1,r12
-	rfid		/* return to userspace */
-	b	.
-
-	STD_EXCEPTION_PSERIES(0xd00, single_step)
-	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
-
-	/* We need to deal with the Altivec unavailable exception
-	 * here which is at 0xf20, thus in the middle of the
-	 * prolog code of the PerformanceMonitor one. A little
-	 * trickery is thus necessary
-	 */
-	. = 0xf00
-	b	performance_monitor_pSeries
-
-	. = 0xf20
-	b	altivec_unavailable_pSeries
-
-	. = 0xf40
-	b	vsx_unavailable_pSeries
-
-#ifdef CONFIG_CBE_RAS
-	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
-#endif /* CONFIG_CBE_RAS */
-	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
-#ifdef CONFIG_CBE_RAS
-	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
-#endif /* CONFIG_CBE_RAS */
-	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
-#ifdef CONFIG_CBE_RAS
-	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
-#endif /* CONFIG_CBE_RAS */
-
-	. = 0x3000
-
-/*** pSeries interrupt support ***/
-
-	/* moved from 0xf00 */
-	STD_EXCEPTION_PSERIES(., performance_monitor)
-	STD_EXCEPTION_PSERIES(., altivec_unavailable)
-	STD_EXCEPTION_PSERIES(., vsx_unavailable)
-
-/*
- * An interrupt came in while soft-disabled; clear EE in SRR1,
- * clear paca->hard_enabled and return.
- */
-masked_interrupt:
-	stb	r10,PACAHARDIRQEN(r13)
-	mtcrf	0x80,r9
-	ld	r9,PACA_EXGEN+EX_R9(r13)
-	mfspr	r10,SPRN_SRR1
-	rldicl	r10,r10,48,1		/* clear MSR_EE */
-	rotldi	r10,r10,16
-	mtspr	SPRN_SRR1,r10
-	ld	r10,PACA_EXGEN+EX_R10(r13)
-	mfspr	r13,SPRN_SPRG1
-	rfid
-	b	.
-
-	.align	7
-do_stab_bolted_pSeries:
-	mtcrf	0x80,r12
-	mfspr	r12,SPRN_SPRG2
-	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
-
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Vectors for the FWNMI option.  Share common code.
- */
-	.globl system_reset_fwnmi
-      .align 7
-system_reset_fwnmi:
-	HMT_MEDIUM
-	mtspr	SPRN_SPRG1,r13		/* save r13 */
-	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
-
-	.globl machine_check_fwnmi
-      .align 7
-machine_check_fwnmi:
-	HMT_MEDIUM
-	mtspr	SPRN_SPRG1,r13		/* save r13 */
-	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
-#endif /* CONFIG_PPC_PSERIES */
-
-#ifdef __DISABLED__
-/*
- * This is used for when the SLB miss handler has to go virtual,
- * which doesn't happen for now anymore but will once we re-implement
- * dynamic VSIDs for shared page tables
- */
-slb_miss_user_pseries:
-	std	r10,PACA_EXGEN+EX_R10(r13)
-	std	r11,PACA_EXGEN+EX_R11(r13)
-	std	r12,PACA_EXGEN+EX_R12(r13)
-	mfspr	r10,SPRG1
-	ld	r11,PACA_EXSLB+EX_R9(r13)
-	ld	r12,PACA_EXSLB+EX_R3(r13)
-	std	r10,PACA_EXGEN+EX_R13(r13)
-	std	r11,PACA_EXGEN+EX_R9(r13)
-	std	r12,PACA_EXGEN+EX_R3(r13)
-	clrrdi	r12,r13,32
-	mfmsr	r10
-	mfspr	r11,SRR0			/* save SRR0 */
-	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
-	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
-	mtspr	SRR0,r12
-	mfspr	r12,SRR1			/* and SRR1 */
-	mtspr	SRR1,r10
-	rfid
-	b	.				/* prevent spec. execution */
-#endif /* __DISABLED__ */
-
-	.align	7
-	.globl	__end_interrupts
-__end_interrupts:
-
-/*
- * Code from here down to __end_handlers is invoked from the
- * exception prologs above.  Because the prologs assemble the
- * addresses of these handlers using the LOAD_HANDLER macro,
- * which uses an addi instruction, these handlers must be in
- * the first 32k of the kernel image.
- */
-
-/*** Common interrupt handlers ***/
-
-	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
-
-	/*
-	 * Machine check is different because we use a different
-	 * save area: PACA_EXMC instead of PACA_EXGEN.
-	 */
-	.align	7
-	.globl machine_check_common
-machine_check_common:
-	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
-	FINISH_NAP
-	DISABLE_INTS
-	bl	.save_nvgprs
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.machine_check_exception
-	b	.ret_from_except
-
-	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
-	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
-	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
-	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
-	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
-	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
-	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
-#ifdef CONFIG_ALTIVEC
-	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
-#else
-	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
-#endif
-#ifdef CONFIG_CBE_RAS
-	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
-	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
-	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
-#endif /* CONFIG_CBE_RAS */
-
-	.align	7
-system_call_entry:
-	b	system_call_common
-
-/*
- * Here we have detected that the kernel stack pointer is bad.
- * R9 contains the saved CR, r13 points to the paca,
- * r10 contains the (bad) kernel stack pointer,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * We switch to using an emergency stack, save the registers there,
- * and call kernel_bad_stack(), which panics.
- */
-bad_stack:
-	ld	r1,PACAEMERGSP(r13)
-	subi	r1,r1,64+INT_FRAME_SIZE
-	std	r9,_CCR(r1)
-	std	r10,GPR1(r1)
-	std	r11,_NIP(r1)
-	std	r12,_MSR(r1)
-	mfspr	r11,SPRN_DAR
-	mfspr	r12,SPRN_DSISR
-	std	r11,_DAR(r1)
-	std	r12,_DSISR(r1)
-	mflr	r10
-	mfctr	r11
-	mfxer	r12
-	std	r10,_LINK(r1)
-	std	r11,_CTR(r1)
-	std	r12,_XER(r1)
-	SAVE_GPR(0,r1)
-	SAVE_GPR(2,r1)
-	SAVE_4GPRS(3,r1)
-	SAVE_2GPRS(7,r1)
-	SAVE_10GPRS(12,r1)
-	SAVE_10GPRS(22,r1)
-	lhz	r12,PACA_TRAP_SAVE(r13)
-	std	r12,_TRAP(r1)
-	addi	r11,r1,INT_FRAME_SIZE
-	std	r11,0(r1)
-	li	r12,0
-	std	r12,0(r11)
-	ld	r2,PACATOC(r13)
-1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.kernel_bad_stack
-	b	1b
-
-/*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- */
-	.align	7
-	.globl data_access_common
-data_access_common:
-	mfspr	r10,SPRN_DAR
-	std	r10,PACA_EXGEN+EX_DAR(r13)
-	mfspr	r10,SPRN_DSISR
-	stw	r10,PACA_EXGEN+EX_DSISR(r13)
-	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
-	ld	r3,PACA_EXGEN+EX_DAR(r13)
-	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
-	li	r5,0x300
-	b	.do_hash_page	 	/* Try to handle as hpte fault */
-
-	.align	7
-	.globl instruction_access_common
-instruction_access_common:
-	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
-	ld	r3,_NIP(r1)
-	andis.	r4,r12,0x5820
-	li	r5,0x400
-	b	.do_hash_page		/* Try to handle as hpte fault */
-
-/*
- * Here is the common SLB miss user that is used when going to virtual
- * mode for SLB misses, that is currently not used
- */
-#ifdef __DISABLED__
-	.align	7
-	.globl	slb_miss_user_common
-slb_miss_user_common:
-	mflr	r10
-	std	r3,PACA_EXGEN+EX_DAR(r13)
-	stw	r9,PACA_EXGEN+EX_CCR(r13)
-	std	r10,PACA_EXGEN+EX_LR(r13)
-	std	r11,PACA_EXGEN+EX_SRR0(r13)
-	bl	.slb_allocate_user
-
-	ld	r10,PACA_EXGEN+EX_LR(r13)
-	ld	r3,PACA_EXGEN+EX_R3(r13)
-	lwz	r9,PACA_EXGEN+EX_CCR(r13)
-	ld	r11,PACA_EXGEN+EX_SRR0(r13)
-	mtlr	r10
-	beq-	slb_miss_fault
-
-	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
-	beq-	unrecov_user_slb
-	mfmsr	r10
-
-.machine push
-.machine "power4"
-	mtcrf	0x80,r9
-.machine pop
-
-	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
-	mtmsrd	r10,1
-
-	mtspr	SRR0,r11
-	mtspr	SRR1,r12
-
-	ld	r9,PACA_EXGEN+EX_R9(r13)
-	ld	r10,PACA_EXGEN+EX_R10(r13)
-	ld	r11,PACA_EXGEN+EX_R11(r13)
-	ld	r12,PACA_EXGEN+EX_R12(r13)
-	ld	r13,PACA_EXGEN+EX_R13(r13)
-	rfid
-	b	.
-
-slb_miss_fault:
-	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
-	ld	r4,PACA_EXGEN+EX_DAR(r13)
-	li	r5,0
-	std	r4,_DAR(r1)
-	std	r5,_DSISR(r1)
-	b	handle_page_fault
-
-unrecov_user_slb:
-	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
-	DISABLE_INTS
-	bl	.save_nvgprs
-1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unrecoverable_exception
-	b	1b
-
-#endif /* __DISABLED__ */
-
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
+ * On server, we include the exception vectors code here as it
+ * relies on absolute addressing which is only possible within
+ * this compilation unit
  */
-_GLOBAL(slb_miss_realmode)
-	mflr	r10
-#ifdef CONFIG_RELOCATABLE
-	mtctr	r11
+#ifdef CONFIG_PPC_BOOK3S
+#include "exceptions-64s.S"
 #endif
 
-	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
-	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
-
-	bl	.slb_allocate_realmode
-
-	/* All done -- return from exception. */
-
-	ld	r10,PACA_EXSLB+EX_LR(r13)
-	ld	r3,PACA_EXSLB+EX_R3(r13)
-	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
-	ld	r11,PACALPPACAPTR(r13)
-	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
-
-	mtlr	r10
-
-	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
-	beq-	2f
-
-.machine	push
-.machine	"power4"
-	mtcrf	0x80,r9
-	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
-.machine	pop
-
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
-	mtspr	SPRN_SRR0,r11
-	mtspr	SPRN_SRR1,r12
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
-	ld	r9,PACA_EXSLB+EX_R9(r13)
-	ld	r10,PACA_EXSLB+EX_R10(r13)
-	ld	r11,PACA_EXSLB+EX_R11(r13)
-	ld	r12,PACA_EXSLB+EX_R12(r13)
-	ld	r13,PACA_EXSLB+EX_R13(r13)
-	rfid
-	b	.	/* prevent speculative execution */
-
-2:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
-	b	unrecov_slb
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
-	mfspr	r11,SPRN_SRR0
-	ld	r10,PACAKBASE(r13)
-	LOAD_HANDLER(r10,unrecov_slb)
-	mtspr	SPRN_SRR0,r10
-	ld	r10,PACAKMSR(r13)
-	mtspr	SPRN_SRR1,r10
-	rfid
-	b	.
-
-unrecov_slb:
-	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
-	DISABLE_INTS
-	bl	.save_nvgprs
-1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unrecoverable_exception
-	b	1b
-
-	.align	7
-	.globl hardware_interrupt_common
-	.globl hardware_interrupt_entry
-hardware_interrupt_common:
-	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
-	FINISH_NAP
-hardware_interrupt_entry:
-	DISABLE_INTS
-BEGIN_FTR_SECTION
-	bl	.ppc64_runlatch_on
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_IRQ
-	b	.ret_from_except_lite
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
-	andc	r9,r9,r10
-	std	r9,TI_LOCAL_FLAGS(r11)
-	ld	r10,_LINK(r1)		/* make idle task do the */
-	std	r10,_NIP(r1)		/* equivalent of a blr */
-	blr
-#endif
-
-	.align	7
-	.globl alignment_common
-alignment_common:
-	mfspr	r10,SPRN_DAR
-	std	r10,PACA_EXGEN+EX_DAR(r13)
-	mfspr	r10,SPRN_DSISR
-	stw	r10,PACA_EXGEN+EX_DSISR(r13)
-	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
-	ld	r3,PACA_EXGEN+EX_DAR(r13)
-	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
-	std	r3,_DAR(r1)
-	std	r4,_DSISR(r1)
-	bl	.save_nvgprs
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	ENABLE_INTS
-	bl	.alignment_exception
-	b	.ret_from_except
-
-	.align	7
-	.globl program_check_common
-program_check_common:
-	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
-	bl	.save_nvgprs
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	ENABLE_INTS
-	bl	.program_check_exception
-	b	.ret_from_except
-
-	.align	7
-	.globl fp_unavailable_common
-fp_unavailable_common:
-	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
-	bne	1f			/* if from user, just load it up */
-	bl	.save_nvgprs
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	ENABLE_INTS
-	bl	.kernel_fp_unavailable_exception
-	BUG_OPCODE
-1:	bl	.load_up_fpu
-	b	fast_exception_return
-
-	.align	7
-	.globl altivec_unavailable_common
-altivec_unavailable_common:
-	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
-	beq	1f
-	bl	.load_up_altivec
-	b	fast_exception_return
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif
-	bl	.save_nvgprs
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	ENABLE_INTS
-	bl	.altivec_unavailable_exception
-	b	.ret_from_except
-
-	.align	7
-	.globl vsx_unavailable_common
-vsx_unavailable_common:
-	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-	bne	.load_up_vsx
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
-	bl	.save_nvgprs
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	ENABLE_INTS
-	bl	.vsx_unavailable_exception
-	b	.ret_from_except
-
-	.align	7
-	.globl	__end_handlers
-__end_handlers:
-
-/*
- * Return from an exception with minimal checks.
- * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
- * If interrupts have been enabled, or anything has been
- * done that might have changed the scheduling status of
- * any task or sent any task a signal, you should use
- * ret_from_except or ret_from_except_lite instead of this.
- */
-fast_exc_return_irq:			/* restores irq state too */
-	ld	r3,SOFTE(r1)
-	TRACE_AND_RESTORE_IRQ(r3);
-	ld	r12,_MSR(r1)
-	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
-	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
-	b	1f
-
-	.globl	fast_exception_return
-fast_exception_return:
-	ld	r12,_MSR(r1)
-1:	ld	r11,_NIP(r1)
-	andi.	r3,r12,MSR_RI		/* check if RI is set */
-	beq-	unrecov_fer
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-	andi.	r3,r12,MSR_PR
-	beq	2f
-	ACCOUNT_CPU_USER_EXIT(r3, r4)
-2:
-#endif
-
-	ld	r3,_CCR(r1)
-	ld	r4,_LINK(r1)
-	ld	r5,_CTR(r1)
-	ld	r6,_XER(r1)
-	mtcr	r3
-	mtlr	r4
-	mtctr	r5
-	mtxer	r6
-	REST_GPR(0, r1)
-	REST_8GPRS(2, r1)
-
-	mfmsr	r10
-	rldicl	r10,r10,48,1		/* clear EE */
-	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
-	mtmsrd	r10,1
-
-	mtspr	SPRN_SRR1,r12
-	mtspr	SPRN_SRR0,r11
-	REST_4GPRS(10, r1)
-	ld	r1,GPR1(r1)
-	rfid
-	b	.	/* prevent speculative execution */
-
-unrecov_fer:
-	bl	.save_nvgprs
-1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unrecoverable_exception
-	b	1b
-
-
-/*
- * Hash table stuff
- */
-	.align	7
-_STATIC(do_hash_page)
-	std	r3,_DAR(r1)
-	std	r4,_DSISR(r1)
-
-	andis.	r0,r4,0xa450		/* weird error? */
-	bne-	handle_page_fault	/* if not, try to insert a HPTE */
-BEGIN_FTR_SECTION
-	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
-	bne-	do_ste_alloc		/* If so handle it */
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-
-	/*
-	 * On iSeries, we soft-disable interrupts here, then
-	 * hard-enable interrupts so that the hash_page code can spin on
-	 * the hash_table_lock without problems on a shared processor.
-	 */
-	DISABLE_INTS
-
-	/*
-	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
-	 * and will clobber volatile registers when irq tracing is enabled
-	 * so we need to reload them. It may be possible to be smarter here
-	 * and move the irq tracing elsewhere but let's keep it simple for
-	 * now
-	 */
-#ifdef CONFIG_TRACE_IRQFLAGS
-	ld	r3,_DAR(r1)
-	ld	r4,_DSISR(r1)
-	ld	r5,_TRAP(r1)
-	ld	r12,_MSR(r1)
-	clrrdi	r5,r5,4
-#endif /* CONFIG_TRACE_IRQFLAGS */
-	/*
-	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
-	 * accessing a userspace segment (even from the kernel). We assume
-	 * kernel addresses always have the high bit set.
-	 */
-	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
-	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
-	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
-	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
-	ori	r4,r4,1			/* add _PAGE_PRESENT */
-	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
-
-	/*
-	 * r3 contains the faulting address
-	 * r4 contains the required access permissions
-	 * r5 contains the trap number
-	 *
-	 * at return r3 = 0 for success
-	 */
-	bl	.hash_page		/* build HPTE if possible */
-	cmpdi	r3,0			/* see if hash_page succeeded */
-
-BEGIN_FW_FTR_SECTION
-	/*
-	 * If we had interrupts soft-enabled at the point where the
-	 * DSI/ISI occurred, and an interrupt came in during hash_page,
-	 * handle it now.
-	 * We jump to ret_from_except_lite rather than fast_exception_return
-	 * because ret_from_except_lite will check for and handle pending
-	 * interrupts if necessary.
-	 */
-	beq	13f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-
-BEGIN_FW_FTR_SECTION
-	/*
-	 * Here we have interrupts hard-disabled, so it is sufficient
-	 * to restore paca->{soft,hard}_enable and get out.
-	 */
-	beq	fast_exc_return_irq	/* Return from exception on success */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
-
-	/* For a hash failure, we don't bother re-enabling interrupts */
-	ble-	12f
-
-	/*
-	 * hash_page couldn't handle it, set soft interrupt enable back
-	 * to what it was before the trap.  Note that .raw_local_irq_restore
-	 * handles any interrupts pending at this point.
-	 */
-	ld	r3,SOFTE(r1)
-	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
-	bl	.raw_local_irq_restore
-	b	11f
-
-/* Here we have a page fault that hash_page can't handle. */
-handle_page_fault:
-	ENABLE_INTS
-11:	ld	r4,_DAR(r1)
-	ld	r5,_DSISR(r1)
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_page_fault
-	cmpdi	r3,0
-	beq+	13f
-	bl	.save_nvgprs
-	mr	r5,r3
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	lwz	r4,_DAR(r1)
-	bl	.bad_page_fault
-	b	.ret_from_except
-
-13:	b	.ret_from_except_lite
-
-/* We have a page fault that hash_page could handle but HV refused
- * the PTE insertion
- */
-12:	bl	.save_nvgprs
-	mr	r5,r3
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	ld	r4,_DAR(r1)
-	bl	.low_hash_fault
-	b	.ret_from_except
-
-	/* here we have a segment miss */
-do_ste_alloc:
-	bl	.ste_allocate		/* try to insert stab entry */
-	cmpdi	r3,0
-	bne-	handle_page_fault
-	b	fast_exception_return
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * r9 - r13 are saved in paca->exslb.
- * We assume we aren't going to take any exceptions during this procedure.
- * We assume (DAR >> 60) == 0xc.
- */
-	.align	7
-_GLOBAL(do_stab_bolted)
-	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
-	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
-
-	/* Hash to the primary group */
-	ld	r10,PACASTABVIRT(r13)
-	mfspr	r11,SPRN_DAR
-	srdi	r11,r11,28
-	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
-
-	/* Calculate VSID */
-	/* This is a kernel address, so protovsid = ESID */
-	ASM_VSID_SCRAMBLE(r11, r9, 256M)
-	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
-
-	/* Search the primary group for a free entry */
-1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
-	andi.	r11,r11,0x80
-	beq	2f
-	addi	r10,r10,16
-	andi.	r11,r10,0x70
-	bne	1b
-
-	/* Stick for only searching the primary group for now.		*/
-	/* At least for now, we use a very simple random castout scheme */
-	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
-	mftb	r11
-	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
-	ori	r11,r11,0x10
-
-	/* r10 currently points to an ste one past the group of interest */
-	/* make it point to the randomly selected entry			*/
-	subi	r10,r10,128
-	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
-
-	isync			/* mark the entry invalid		*/
-	ld	r11,0(r10)
-	rldicl	r11,r11,56,1	/* clear the valid bit */
-	rotldi	r11,r11,8
-	std	r11,0(r10)
-	sync
-
-	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
-	slbie	r11
-
-2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
-	eieio
-
-	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
-	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
-	ori	r11,r11,0x90	/* Turn on valid and kp			*/
-	std	r11,0(r10)	/* Put new entry back into the stab	*/
-
-	sync
-
-	/* All done -- return from exception. */
-	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
-	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
-
-	andi.	r10,r12,MSR_RI
-	beq-	unrecov_slb
-
-	mtcrf	0x80,r9			/* restore CR */
-
-	mfmsr	r10
-	clrrdi	r10,r10,2
-	mtmsrd	r10,1
-
-	mtspr	SPRN_SRR0,r11
-	mtspr	SPRN_SRR1,r12
-	ld	r9,PACA_EXSLB+EX_R9(r13)
-	ld	r10,PACA_EXSLB+EX_R10(r13)
-	ld	r11,PACA_EXSLB+EX_R11(r13)
-	ld	r12,PACA_EXSLB+EX_R12(r13)
-	ld	r13,PACA_EXSLB+EX_R13(r13)
-	rfid
-	b	.	/* prevent speculative execution */
-
-/*
- * Space for CPU0's segment table.
- *
- * On iSeries, the hypervisor must fill in at least one entry before
- * we get control (with relocate on).  The address is given to the hv
- * as a page number (see xLparMap below), so this must be at a
- * fixed address (the linker can't compute (u64)&initial_stab >>
- * PAGE_SHIFT).
- */
-	. = STAB0_OFFSET	/* 0x6000 */
-	.globl initial_stab
-initial_stab:
-	.space	4096
-
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Data area reserved for FWNMI option.
- * This address (0x7000) is fixed by the RPA.
- */
-	.= 0x7000
-	.globl fwnmi_data_area
-fwnmi_data_area:
-#endif /* CONFIG_PPC_PSERIES */
-
-	/* iSeries does not use the FWNMI stuff, so it is safe to put
-	 * this here, even if we later allow kernels that will boot on
-	 * both pSeries and iSeries */
-#ifdef CONFIG_PPC_ISERIES
-        . = LPARMAP_PHYS
-	.globl xLparMap
-xLparMap:
-	.quad	HvEsidsToMap		/* xNumberEsids */
-	.quad	HvRangesToMap		/* xNumberRanges */
-	.quad	STAB0_PAGE		/* xSegmentTableOffs */
-	.zero	40			/* xRsvd */
-	/* xEsids (HvEsidsToMap entries of 2 quads) */
-	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
-	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
-	.quad	VMALLOC_START_ESID	/* xKernelEsid */
-	.quad	VMALLOC_START_VSID	/* xKernelVsid */
-	/* xRanges (HvRangesToMap entries of 3 quads) */
-	.quad	HvPagesToMap		/* xPages */
-	.quad	0			/* xOffset */
-	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
-
-#endif /* CONFIG_PPC_ISERIES */
-
-#ifdef CONFIG_PPC_PSERIES
-        . = 0x8000
-#endif /* CONFIG_PPC_PSERIES */
 
 /*
  * On pSeries and most other platforms, secondary processors spin

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S
  2009-06-02  7:50 [PATCH 0/5] powerpc: A bit more way paved toward 64-bit Book3E support Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 1/5] powerpc: Move VMX and VSX asm code to vector.S Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 2/5] powerpc: Split exception handling out of head_64.S Benjamin Herrenschmidt
@ 2009-06-02  7:50 ` Benjamin Herrenschmidt
  2009-06-02 10:49   ` Arnd Bergmann
  2009-06-02  7:50 ` [PATCH 4/5] powerpc: Separate PACA fields for server CPUs Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 5/5] powerpc: Shield code specific to 64-bit server processors Benjamin Herrenschmidt
  4 siblings, 1 reply; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02  7:50 UTC (permalink / raw)
  To: linuxppc-dev

This patch introduce a new Kconfig option, CONFIG_PPC_BOOK3S
that represents processors that are compliant with the "classic"
(aka "server") variant of the PowerPC architecture.

It replaces CONFIG_6xx on 32-bit (though the symbol is still
defined for compatibility) and encompass all currently supported
64-bit processors.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/platforms/Kconfig.cputype |   28 +++++++++++++++++-----------
 1 file changed, 17 insertions(+), 11 deletions(-)

--- linux-work.orig/arch/powerpc/platforms/Kconfig.cputype	2009-06-02 16:29:27.000000000 +1000
+++ linux-work/arch/powerpc/platforms/Kconfig.cputype	2009-06-02 16:55:01.000000000 +1000
@@ -9,7 +9,6 @@ menu "Processor support"
 choice
 	prompt "Processor Type"
 	depends on PPC32
-	default 6xx
 	help
 	  There are five families of 32 bit PowerPC chips supported.
 	  The most common ones are the desktop and server CPUs (601, 603,
@@ -21,24 +20,27 @@ choice
 
 	  If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx.
 
-config 6xx
+config PPC_BOOK3S
 	bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
 	select PPC_FPU
 
 config PPC_85xx
 	bool "Freescale 85xx"
+	depends on PPC32
 	select E500
 	select FSL_SOC
 	select MPC85xx
 
 config PPC_8xx
 	bool "Freescale 8xx"
+	depends on PPC32
 	select FSL_SOC
 	select 8xx
 	select PPC_LIB_RHEAP
 
 config 40x
 	bool "AMCC 40x"
+	depends on PPC32
 	select PPC_DCR_NATIVE
 	select PPC_UDBG_16550
 	select 4xx_SOC
@@ -46,6 +48,7 @@ config 40x
 
 config 44x
 	bool "AMCC 44x"
+	depends on PPC32
 	select PPC_DCR_NATIVE
 	select PPC_UDBG_16550
 	select 4xx_SOC
@@ -54,16 +57,15 @@ config 44x
 
 config E200
 	bool "Freescale e200"
+	depends on PPC32
 
 endchoice
 
-# Until we have a choice of exclusive CPU types on 64-bit, we always
-# use PPC_BOOK3S. On 32-bit, this is equivalent to 6xx which is
-# "classic" MMU
-
 config PPC_BOOK3S
-       def_bool y
-       depends on PPC64 || 6xx
+	default y
+       	depends on PPC64
+	select PPC_FPU
+
 
 config POWER4_ONLY
 	bool "Optimize for POWER4"
@@ -74,6 +76,10 @@ config POWER4_ONLY
 	  The resulting binary will not work on POWER3 or RS64 processors
 	  when compiled with binutils 2.15 or later.
 
+config 6xx
+	def_bool y
+	depends on PPC32 && PPC_BOOK3S
+
 config POWER3
 	bool
 	depends on PPC64 && PPC_BOOK3S
@@ -125,6 +131,7 @@ config BOOKE
 config FSL_BOOKE
 	bool
 	depends on E200 || E500
+	select PPC_BOOK3E_MMU
 	default y
 
 config FSL_EMB_PERFMON
@@ -203,7 +210,7 @@ config SPE
 
 config PPC_STD_MMU
 	bool
-	depends on 6xx || PPC64
+	depends on PPC_BOOK3S
 	default y
 
 config PPC_STD_MMU_32
@@ -219,8 +226,7 @@ config PPC_MMU_NOHASH
 	depends on !PPC_STD_MMU
 
 config PPC_BOOK3E_MMU
-	def_bool y
-	depends on FSL_BOOKE
+       def_bool n
 
 config PPC_MM_SLICES
 	bool

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 4/5] powerpc: Separate PACA fields for server CPUs
  2009-06-02  7:50 [PATCH 0/5] powerpc: A bit more way paved toward 64-bit Book3E support Benjamin Herrenschmidt
                   ` (2 preceding siblings ...)
  2009-06-02  7:50 ` [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S Benjamin Herrenschmidt
@ 2009-06-02  7:50 ` Benjamin Herrenschmidt
  2009-06-02  7:50 ` [PATCH 5/5] powerpc: Shield code specific to 64-bit server processors Benjamin Herrenschmidt
  4 siblings, 0 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02  7:50 UTC (permalink / raw)
  To: linuxppc-dev

This patch has no effect other than re-ordering PACA fields on
current server CPUs. It however is a pre-requisite for future
support of BookE 64-bit processors. Various parts of the PACA
struct are now moved under some ifdef's, either the new
CONFIG_PPC_BOOK3S or CONFIG_PPC_STD_MMU_64, whatever seems more
appropriate.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.craashing.org>
---

 arch/powerpc/include/asm/paca.h   |   12 +++++++++---
 arch/powerpc/kernel/asm-offsets.c |   32 +++++++++++++++++---------------
 arch/powerpc/kernel/paca.c        |   14 +++++++++++++-
 3 files changed, 39 insertions(+), 19 deletions(-)

--- linux-work.orig/arch/powerpc/include/asm/paca.h	2009-06-02 17:05:07.000000000 +1000
+++ linux-work/arch/powerpc/include/asm/paca.h	2009-06-02 17:05:09.000000000 +1000
@@ -43,6 +43,7 @@ struct task_struct;
  * processor.
  */
 struct paca_struct {
+#ifdef CONFIG_PPC_BOOK3S
 	/*
 	 * Because hw_cpu_id, unlike other paca fields, is accessed
 	 * routinely from other CPUs (from the IRQ code), we stick to
@@ -51,7 +52,7 @@ struct paca_struct {
 	 */
 
 	struct lppaca *lppaca_ptr;	/* Pointer to LpPaca for PLIC */
-
+#endif /* CONFIG_PPC_BOOK3S */
 	/*
 	 * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c 
 	 * load lock_token and paca_index with a single lwz
@@ -64,13 +65,16 @@ struct paca_struct {
 	u64 kernel_toc;			/* Kernel TOC address */
 	u64 kernelbase;			/* Base address of kernel */
 	u64 kernel_msr;			/* MSR while running in kernel */
+#ifdef CONFIG_PPC_STD_MMU_64
 	u64 stab_real;			/* Absolute address of segment table */
 	u64 stab_addr;			/* Virtual address of segment table */
+#endif /* CONFIG_PPC_STD_MMU_64 */
 	void *emergency_sp;		/* pointer to emergency stack */
 	u64 data_offset;		/* per cpu data offset */
 	s16 hw_cpu_id;			/* Physical processor number */
 	u8 cpu_start;			/* At startup, processor spins until */
 					/* this becomes non-zero. */
+#ifdef CONFIG_PPC_STD_MMU_64
 	struct slb_shadow *slb_shadow_ptr;
 
 	/*
@@ -81,11 +85,13 @@ struct paca_struct {
 	u64 exmc[10];		/* used for machine checks */
 	u64 exslb[10];		/* used for SLB/segment table misses
  				 * on the linear mapping */
-
-	mm_context_t context;
+	/* SLB related definitions */
 	u16 vmalloc_sllp;
 	u16 slb_cache_ptr;
 	u16 slb_cache[SLB_CACHE_ENTRIES];
+#endif /* CONFIG_PPC_STD_MMU_64 */
+
+	mm_context_t context;
 
 	/*
 	 * then miscellaneous read-write fields
Index: linux-work/arch/powerpc/kernel/asm-offsets.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/asm-offsets.c	2009-06-02 17:05:07.000000000 +1000
+++ linux-work/arch/powerpc/kernel/asm-offsets.c	2009-06-02 17:05:23.000000000 +1000
@@ -122,8 +122,6 @@ int main(void)
 	DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
 	DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
 	DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
-	DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
-	DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
 	DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
 	DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
 	DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
@@ -131,35 +129,30 @@ int main(void)
 	DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
 	DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
 	DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
-	DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
-	DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
 	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
-	DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
 #ifdef CONFIG_PPC_MM_SLICES
 	DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
 					    context.low_slices_psize));
 	DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
 					    context.high_slices_psize));
 	DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
+#endif /* CONFIG_PPC_MM_SLICES */
+#ifdef CONFIG_PPC_STD_MMU_64
+	DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
+	DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
+	DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
+	DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
+	DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
+#ifdef CONFIG_PPC_MM_SLICES
 	DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
 #else
 	DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
-
 #endif /* CONFIG_PPC_MM_SLICES */
 	DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
 	DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
 	DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
-	DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
 	DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
-	DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
-	DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
-	DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
-	DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
-	DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
 	DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
-	DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
-	DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
-
 	DEFINE(SLBSHADOW_STACKVSID,
 	       offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
 	DEFINE(SLBSHADOW_STACKESID,
@@ -169,6 +162,15 @@ int main(void)
 	DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
 	DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
 	DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
+#endif /* CONFIG_PPC_STD_MMU_64 */
+	DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
+	DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+	DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
+	DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
+	DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
+	DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
+	DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
+	DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
 #endif /* CONFIG_PPC64 */
 
 	/* RTAS */
Index: linux-work/arch/powerpc/kernel/paca.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/paca.c	2009-06-02 17:05:07.000000000 +1000
+++ linux-work/arch/powerpc/kernel/paca.c	2009-06-02 17:05:09.000000000 +1000
@@ -18,6 +18,8 @@
  * field correctly */
 extern unsigned long __toc_start;
 
+#ifdef CONFIG_PPC_BOOK3S
+
 /*
  * The structure which the hypervisor knows about - this structure
  * should not cross a page boundary.  The vpa_init/register_vpa call
@@ -41,6 +43,10 @@ struct lppaca lppaca[] = {
 	},
 };
 
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_STD_MMU_64
+
 /*
  * 3 persistent SLBs are registered here.  The buffer will be zero
  * initially, hence will all be invaild until we actually write them.
@@ -52,6 +58,8 @@ struct slb_shadow slb_shadow[] __cacheli
 	},
 };
 
+#endif /* CONFIG_PPC_STD_MMU_64 */
+
 /* The Paca is an array with one entry per processor.  Each contains an
  * lppaca, which contains the information shared between the
  * hypervisor and Linux.
@@ -77,15 +85,19 @@ void __init initialise_pacas(void)
 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
 		struct paca_struct *new_paca = &paca[cpu];
 
+#ifdef CONFIG_PPC_BOOK3S
 		new_paca->lppaca_ptr = &lppaca[cpu];
+#endif
 		new_paca->lock_token = 0x8000;
 		new_paca->paca_index = cpu;
 		new_paca->kernel_toc = kernel_toc;
 		new_paca->kernelbase = (unsigned long) _stext;
 		new_paca->kernel_msr = MSR_KERNEL;
 		new_paca->hw_cpu_id = 0xffff;
-		new_paca->slb_shadow_ptr = &slb_shadow[cpu];
 		new_paca->__current = &init_task;
+#ifdef CONFIG_PPC_STD_MMU_64
+		new_paca->slb_shadow_ptr = &slb_shadow[cpu];
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
 	}
 }

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 5/5] powerpc: Shield code specific to 64-bit server processors
  2009-06-02  7:50 [PATCH 0/5] powerpc: A bit more way paved toward 64-bit Book3E support Benjamin Herrenschmidt
                   ` (3 preceding siblings ...)
  2009-06-02  7:50 ` [PATCH 4/5] powerpc: Separate PACA fields for server CPUs Benjamin Herrenschmidt
@ 2009-06-02  7:50 ` Benjamin Herrenschmidt
  4 siblings, 0 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02  7:50 UTC (permalink / raw)
  To: linuxppc-dev

This is a random collection of added ifdef's around portions of
code that only mak sense on server processors. Using either
CONFIG_PPC_STD_MMU_64 or CONFIG_PPC_BOOK3S as seems appropriate.

This is meant to make the future merging of Book3E 64-bit support
easier.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/include/asm/lppaca.h        |    6 ++++++
 arch/powerpc/include/asm/mmu.h           |    4 ++--
 arch/powerpc/include/asm/pgtable-ppc64.h |    5 +++++
 arch/powerpc/kernel/irq.c                |    2 ++
 arch/powerpc/kernel/pci_64.c             |    5 +++++
 arch/powerpc/kernel/process.c            |    2 +-
 arch/powerpc/kernel/prom.c               |    2 +-
 arch/powerpc/kernel/setup_64.c           |    5 ++++-
 arch/powerpc/mm/Makefile                 |    7 ++++---
 arch/powerpc/mm/init_64.c                |    2 ++
 10 files changed, 32 insertions(+), 8 deletions(-)

--- linux-work.orig/arch/powerpc/mm/Makefile	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/mm/Makefile	2009-06-02 16:55:46.000000000 +1000
@@ -11,10 +11,11 @@ obj-y				:= fault.o mem.o pgtable.o gup.
 				   pgtable_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC_MMU_NOHASH)	+= mmu_context_nohash.o tlb_nohash.o \
 				   tlb_nohash_low.o
-hash-$(CONFIG_PPC_NATIVE)	:= hash_native_64.o
-obj-$(CONFIG_PPC64)		+= hash_utils_64.o \
+obj-$(CONFIG_PPC64)		+= mmap_64.o
+hash64-$(CONFIG_PPC_NATIVE)	:= hash_native_64.o
+obj-$(CONFIG_PPC_STD_MMU_64)	+= hash_utils_64.o \
 				   slb_low.o slb.o stab.o \
-				   mmap_64.o $(hash-y)
+				   mmap_64.o $(hash64-y)
 obj-$(CONFIG_PPC_STD_MMU_32)	+= ppc_mmu_32.o
 obj-$(CONFIG_PPC_STD_MMU)	+= hash_low_$(CONFIG_WORD_SIZE).o \
 				   tlb_hash$(CONFIG_WORD_SIZE).o \
Index: linux-work/arch/powerpc/include/asm/pgtable-ppc64.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pgtable-ppc64.h	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/include/asm/pgtable-ppc64.h	2009-06-02 16:55:46.000000000 +1000
@@ -31,9 +31,11 @@
 #error TASK_SIZE_USER64 exceeds pagetable range
 #endif
 
+#ifdef CONFIG_PPC_STD_MMU_64
 #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
 #error TASK_SIZE_USER64 exceeds user VSID range
 #endif
+#endif
 
 /*
  * Define the address range of the vmalloc VM area.
@@ -199,8 +201,11 @@ static inline unsigned long pte_update(s
 	if (!huge)
 		assert_pte_locked(mm, addr);
 
+#ifdef CONFIG_PPC_STD_MMU_64
 	if (old & _PAGE_HASHPTE)
 		hpte_need_flush(mm, addr, ptep, old, huge);
+#endif
+
 	return old;
 }
 
Index: linux-work/arch/powerpc/include/asm/mmu.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/mmu.h	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/include/asm/mmu.h	2009-06-02 16:55:46.000000000 +1000
@@ -74,10 +74,10 @@ extern void early_init_mmu_secondary(voi
 #endif /* !__ASSEMBLY__ */
 
 
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC_STD_MMU_64)
 /* 64-bit classic hash table MMU */
 #  include <asm/mmu-hash64.h>
-#elif defined(CONFIG_PPC_STD_MMU)
+#elif defined(CONFIG_PPC_STD_MMU_32)
 /* 32-bit classic hash table MMU */
 #  include <asm/mmu-hash32.h>
 #elif defined(CONFIG_40x)
Index: linux-work/arch/powerpc/kernel/pci_64.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/pci_64.c	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/kernel/pci_64.c	2009-06-02 16:55:46.000000000 +1000
@@ -420,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bu
 	 * so flushing the hash table is the only sane way to make sure
 	 * that no hash entries are covering that removed bridge area
 	 * while still allowing other busses overlapping those pages
+	 *
+	 * Note: If we ever support P2P hotplug on Book3E, we'll have
+	 * to do an appropriate TLB flush here too
 	 */
 	if (bus->self) {
 		struct resource *res = bus->resource[0];
@@ -427,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bu
 		pr_debug("IO unmapping for PCI-PCI bridge %s\n",
 			 pci_name(bus->self));
 
+#ifdef CONFIG_PPC_STD_MMU_64
 		__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
 					 res->end + _IO_BASE + 1);
+#endif
 		return 0;
 	}
 
Index: linux-work/arch/powerpc/include/asm/lppaca.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/lppaca.h	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/include/asm/lppaca.h	2009-06-02 16:55:46.000000000 +1000
@@ -20,6 +20,11 @@
 #define _ASM_POWERPC_LPPACA_H
 #ifdef __KERNEL__
 
+/* These definitions relate to hypervisors that only exist when using
+ * a server type processor
+ */
+#ifdef CONFIG_PPC_BOOK3S
+
 //=============================================================================
 //
 //	This control block contains the data that is shared between the
@@ -158,5 +163,6 @@ struct slb_shadow {
 
 extern struct slb_shadow slb_shadow[];
 
+#endif /* CONFIG_PPC_BOOK3S */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_LPPACA_H */
Index: linux-work/arch/powerpc/kernel/irq.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/irq.c	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/kernel/irq.c	2009-06-02 16:55:46.000000000 +1000
@@ -117,6 +117,7 @@ notrace void raw_local_irq_restore(unsig
 	if (!en)
 		return;
 
+#ifdef CONFIG_PPC_STD_MMU_64
 	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
 		/*
 		 * Do we need to disable preemption here?  Not really: in the
@@ -134,6 +135,7 @@ notrace void raw_local_irq_restore(unsig
 		if (local_paca->lppaca_ptr->int_dword.any_int)
 			iseries_handle_interrupts();
 	}
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
 	/*
 	 * if (get_paca()->hard_enabled) return;
Index: linux-work/arch/powerpc/mm/init_64.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/init_64.c	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/mm/init_64.c	2009-06-02 16:55:46.000000000 +1000
@@ -66,6 +66,7 @@
 
 #include "mmu_decl.h"
 
+#ifdef CONFIG_PPC_STD_MMU_64
 #if PGTABLE_RANGE > USER_VSID_RANGE
 #warning Limited user VSID range means pagetable space is wasted
 #endif
@@ -73,6 +74,7 @@
 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
 #warning TASK_SIZE is smaller than it needs to be.
 #endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
 
 phys_addr_t memstart_addr = ~0;
 phys_addr_t kernstart_addr;
Index: linux-work/arch/powerpc/kernel/process.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/process.c	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/kernel/process.c	2009-06-02 16:55:46.000000000 +1000
@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flag
 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
 				_ALIGN_UP(sizeof(struct thread_info), 16);
 
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
 	if (cpu_has_feature(CPU_FTR_SLB)) {
 		unsigned long sp_vsid;
 		unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
Index: linux-work/arch/powerpc/kernel/setup_64.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/setup_64.c	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/kernel/setup_64.c	2009-06-02 16:55:46.000000000 +1000
@@ -417,9 +417,11 @@ void __init setup_system(void)
 	if (ppc64_caches.iline_size != 0x80)
 		printk("ppc64_caches.icache_line_size = 0x%x\n",
 		       ppc64_caches.iline_size);
+#ifdef CONFIG_PPC_STD_MMU_64
 	if (htab_address)
 		printk("htab_address                  = 0x%p\n", htab_address);
 	printk("htab_hash_mask                = 0x%lx\n", htab_hash_mask);
+#endif /* CONFIG_PPC_STD_MMU_64 */
 	if (PHYSICAL_START > 0)
 		printk("physical_start                = 0x%lx\n",
 		       PHYSICAL_START);
@@ -511,8 +513,9 @@ void __init setup_arch(char **cmdline_p)
 	irqstack_early_init();
 	emergency_stack_init();
 
+#ifdef CONFIG_PPC_STD_MMU_64
 	stabs_alloc();
-
+#endif
 	/* set up the bootmem stuff with available memory */
 	do_init_bootmem();
 	sparse_init();
Index: linux-work/arch/powerpc/kernel/prom.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/prom.c	2009-06-02 16:53:18.000000000 +1000
+++ linux-work/arch/powerpc/kernel/prom.c	2009-06-02 16:55:46.000000000 +1000
@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features
 		      ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
 }
 
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
 static void __init check_cpu_slb_size(unsigned long node)
 {
 	u32 *slb_size_ptr;

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/5] powerpc: Split exception handling out of head_64.S
  2009-06-02  7:50 ` [PATCH 2/5] powerpc: Split exception handling out of head_64.S Benjamin Herrenschmidt
@ 2009-06-02 10:32   ` Benjamin Herrenschmidt
  0 siblings, 0 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02 10:32 UTC (permalink / raw)
  To: linuxppc-dev

On Tue, 2009-06-02 at 17:50 +1000, Benjamin Herrenschmidt wrote:

> +#ifdef CONFIG_PPC_BOOK3S
> +#include "exceptions-64s.S"
>  #endif

Patches are in the wrong order ... the symbol above is only
define by patch 3/5 :-)

I'll flip the around before mering if there's no other comment.

Cheers,
Ben.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S
  2009-06-02  7:50 ` [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S Benjamin Herrenschmidt
@ 2009-06-02 10:49   ` Arnd Bergmann
  2009-06-02 11:37     ` Benjamin Herrenschmidt
  2009-06-03  1:54     ` Benjamin Herrenschmidt
  0 siblings, 2 replies; 10+ messages in thread
From: Arnd Bergmann @ 2009-06-02 10:49 UTC (permalink / raw)
  To: linuxppc-dev

On Tuesday 02 June 2009, Benjamin Herrenschmidt wrote:
> --- linux-work.orig/arch/powerpc/platforms/Kconfig.cputype	2009-06-02 16:29:27.000000000 +1000
> +++ linux-work/arch/powerpc/platforms/Kconfig.cputype	2009-06-02 16:55:01.000000000 +1000
> @@ -9,7 +9,6 @@ menu "Processor support"
>  choice
>  	prompt "Processor Type"
>  	depends on PPC32
> -	default 6xx
>  	help
>  	  There are five families of 32 bit PowerPC chips supported.
>  	  The most common ones are the desktop and server CPUs (601, 603,

It looks like you couldn't decide which route to take here. You leave the
'depends on PPC32' above, but

> @@ -21,24 +20,27 @@ choice
>  
>  	  If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx.
>  
> -config 6xx
> +config PPC_BOOK3S
>  	bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
>  	select PPC_FPU
>  
>  config PPC_85xx
>  	bool "Freescale 85xx"
> +	depends on PPC32
>  	select E500
>  	select FSL_SOC
>  	select MPC85xx
>  
>  config PPC_8xx
>  	bool "Freescale 8xx"
> +	depends on PPC32
>  	select FSL_SOC
>  	select 8xx
>  	select PPC_LIB_RHEAP

also add it (redundantly) in all other processor types except BOOK3S, and

> -# Until we have a choice of exclusive CPU types on 64-bit, we always
> -# use PPC_BOOK3S. On 32-bit, this is equivalent to 6xx which is
> -# "classic" MMU
> -
>  config PPC_BOOK3S
> -       def_bool y
> -       depends on PPC64 || 6xx
> +	default y
> +       	depends on PPC64
> +	select PPC_FPU
> +

then add the other BOOK3S option depending on PPC64. Even though
it might look silly to have a choice statement with just one possible
option in case of PPC64, why not integrate it right away, for consistency
reasons. It seems strange to have the same Kconfig symbol both
as a choice and a simple bool.

> @@ -125,6 +131,7 @@ config BOOKE
>  config FSL_BOOKE
>  	bool
>  	depends on E200 || E500
> +	select PPC_BOOK3E_MMU
>  	default y
>  
>  config FSL_EMB_PERFMON
> @@ -203,7 +210,7 @@ config SPE
>  
>  config PPC_STD_MMU
>  	bool
> -	depends on 6xx || PPC64
> +	depends on PPC_BOOK3S
>  	default y
>  
>  config PPC_STD_MMU_32

This also feels inconsistent, using a 'select' in one case and 'depends on' in the
other one. The two ways are obviously equivalent, but I find it a bit confusing
to mix them.

	Arnd <><

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S
  2009-06-02 10:49   ` Arnd Bergmann
@ 2009-06-02 11:37     ` Benjamin Herrenschmidt
  2009-06-03  1:54     ` Benjamin Herrenschmidt
  1 sibling, 0 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-02 11:37 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: linuxppc-dev

On Tue, 2009-06-02 at 11:49 +0100, Arnd Bergmann wrote:
> On Tuesday 02 June 2009, Benjamin Herrenschmidt wrote:
> > --- linux-work.orig/arch/powerpc/platforms/Kconfig.cputype	2009-06-02 16:29:27.000000000 +1000
> > +++ linux-work/arch/powerpc/platforms/Kconfig.cputype	2009-06-02 16:55:01.000000000 +1000
> > @@ -9,7 +9,6 @@ menu "Processor support"
> >  choice
> >  	prompt "Processor Type"
> >  	depends on PPC32
> > -	default 6xx
> >  	help
> >  	  There are five families of 32 bit PowerPC chips supported.
> >  	  The most common ones are the desktop and server CPUs (601, 603,
> 
> It looks like you couldn't decide which route to take here. You leave the
> 'depends on PPC32' above, but

The choice depends on PPC32 since there is no choice .. yet for 64-bit.
I removed the default 6xx because I noticed a warning from Kbuild that
it doesn't like defaults for choices.

> >  config PPC_85xx
> >  	bool "Freescale 85xx"
> > +	depends on PPC32

Ah right, I can remove these. Initially, the choice was available for
both 32 and 64 bit ;-) That's an artifact of the patch splitting since I
only introduce Book3E for 64-bit later.

> also add it (redundantly) in all other processor types except BOOK3S, and

Right. As I said, artifact of the split. I'll remove them for now.

> > -# Until we have a choice of exclusive CPU types on 64-bit, we always
> > -# use PPC_BOOK3S. On 32-bit, this is equivalent to 6xx which is
> > -# "classic" MMU
> > -
> >  config PPC_BOOK3S
> > -       def_bool y
> > -       depends on PPC64 || 6xx
> > +	default y
> > +       	depends on PPC64
> > +	select PPC_FPU
> > +
> 
> then add the other BOOK3S option depending on PPC64. Even though
> it might look silly to have a choice statement with just one possible
> option in case of PPC64, why not integrate it right away, for consistency
> reasons. It seems strange to have the same Kconfig symbol both
> as a choice and a simple bool.

Well, I was hesitating. The initial patch added the choice with E and S
for both 32 and 64 as you can guess. But we aren't ready for that yet.

I suppose I can do a one-option choice in the meantime.

> > @@ -125,6 +131,7 @@ config BOOKE
> >  config FSL_BOOKE
> >  	bool
> >  	depends on E200 || E500
> > +	select PPC_BOOK3E_MMU
> >  	default y
> >  
> >  config FSL_EMB_PERFMON
> > @@ -203,7 +210,7 @@ config SPE
> >  
> >  config PPC_STD_MMU
> >  	bool
> > -	depends on 6xx || PPC64
> > +	depends on PPC_BOOK3S
> >  	default y
> >  
> >  config PPC_STD_MMU_32
> 
> This also feels inconsistent, using a 'select' in one case and 'depends on' in the
> other one. The two ways are obviously equivalent, but I find it a bit confusing
> to mix them.

Right, I should probably use select in both.

Cheers,
Ben.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S
  2009-06-02 10:49   ` Arnd Bergmann
  2009-06-02 11:37     ` Benjamin Herrenschmidt
@ 2009-06-03  1:54     ` Benjamin Herrenschmidt
  1 sibling, 0 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-06-03  1:54 UTC (permalink / raw)
  To: Arnd Bergmann; +Cc: linuxppc-dev

On Tue, 2009-06-02 at 11:49 +0100, Arnd Bergmann wrote:
> 
> then add the other BOOK3S option depending on PPC64. Even though
> it might look silly to have a choice statement with just one possible
> option in case of PPC64, why not integrate it right away, for
> consistency
> reasons. It seems strange to have the same Kconfig symbol both
> as a choice and a simple bool.

After some thinking I decided to keep the PPC64 variant as a separate
simple bool for now, among others, because it should be a separate
choice anyway due to the help text being different for 32 and 64-bit
anyway and I'm not fan of a one-option choice.

Ben.

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2009-06-03  1:54 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-06-02  7:50 [PATCH 0/5] powerpc: A bit more way paved toward 64-bit Book3E support Benjamin Herrenschmidt
2009-06-02  7:50 ` [PATCH 1/5] powerpc: Move VMX and VSX asm code to vector.S Benjamin Herrenschmidt
2009-06-02  7:50 ` [PATCH 2/5] powerpc: Split exception handling out of head_64.S Benjamin Herrenschmidt
2009-06-02 10:32   ` Benjamin Herrenschmidt
2009-06-02  7:50 ` [PATCH 3/5] powerpc: Introduce CONFIG_PPC_BOOK3S Benjamin Herrenschmidt
2009-06-02 10:49   ` Arnd Bergmann
2009-06-02 11:37     ` Benjamin Herrenschmidt
2009-06-03  1:54     ` Benjamin Herrenschmidt
2009-06-02  7:50 ` [PATCH 4/5] powerpc: Separate PACA fields for server CPUs Benjamin Herrenschmidt
2009-06-02  7:50 ` [PATCH 5/5] powerpc: Shield code specific to 64-bit server processors Benjamin Herrenschmidt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).