* [PATCH v2 1/3] [POWERPC] Move to runtime allocated exception stacks @ 2008-05-01 6:29 Kumar Gala 2008-05-01 6:29 ` [PATCH v2 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code Kumar Gala 0 siblings, 1 reply; 8+ messages in thread From: Kumar Gala @ 2008-05-01 6:29 UTC (permalink / raw) To: paulus, benh; +Cc: linuxppc-dev For the additonal exception levels (critical, debug, machine check) on 40x/book-e we were using "static" allocations of the stack in the associated head.S. Move to a runtime allocation to make the code a bit easier to read as we mimic how we handle IRQ stacks. Its also a bit easier to setup the stack with a "dummy" thread_info in C code. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> --- Renamed STACK_TOP to STACK_BASE to make this a bit more clear. - k arch/powerpc/kernel/head_40x.S | 14 ++------------ arch/powerpc/kernel/head_44x.S | 9 --------- arch/powerpc/kernel/head_booke.h | 29 +++++++++++------------------ arch/powerpc/kernel/head_fsl_booke.S | 9 --------- arch/powerpc/kernel/irq.c | 33 +++++++++++++++++++++++++++++++++ arch/powerpc/kernel/setup_32.c | 24 ++++++++++++++++++++++++ include/asm-powerpc/irq.h | 13 +++++++++++++ 7 files changed, 83 insertions(+), 48 deletions(-) diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 8552e67..ca75eaf 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -148,8 +148,8 @@ _ENTRY(crit_r11) mfcr r10; /* save CR in r10 for now */\ mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ andi. r11,r11,MSR_PR; \ - lis r11,critical_stack_top@h; \ - ori r11,r11,critical_stack_top@l; \ + lis r11,critirq_ctx@ha; \ + lwz r11,critirq_ctx@l(r11); \ beq 1f; \ /* COMING FROM USER MODE */ \ mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ @@ -996,16 +996,6 @@ empty_zero_page: swapper_pg_dir: .space PGD_TABLE_SIZE - -/* Stack for handling critical exceptions from kernel mode */ - .section .bss - .align 12 -exception_stack_bottom: - .space 4096 -critical_stack_top: - .globl exception_stack_top -exception_stack_top: - /* Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. */ diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index b84ec6a..2041248 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -730,15 +730,6 @@ empty_zero_page: swapper_pg_dir: .space PGD_TABLE_SIZE -/* Reserved 4k for the critical exception stack & 4k for the machine - * check stack per CPU for kernel mode exceptions */ - .section .bss - .align 12 -exception_stack_bottom: - .space BOOKE_EXCEPTION_STACK_SIZE - .globl exception_stack_top -exception_stack_top: - /* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 721faef..9eacf4c 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -43,9 +43,7 @@ SAVE_2GPRS(7, r11) /* To handle the additional exception priority levels on 40x and Book-E - * processors we allocate a 4k stack per additional priority level. The various - * head_xxx.S files allocate space (exception_stack_top) for each priority's - * stack times the number of CPUs + * processors we allocate a stack per additional priority level. * * On 40x critical is the only additional level * On 44x/e500 we have critical and machine check @@ -61,36 +59,31 @@ * going to critical or their own debug level we aren't currently * providing configurations that micro-optimize space usage. */ -#ifdef CONFIG_44x -#define NUM_EXCEPTION_LVLS 2 -#else -#define NUM_EXCEPTION_LVLS 3 -#endif -#define BOOKE_EXCEPTION_STACK_SIZE (4096 * NUM_EXCEPTION_LVLS) /* CRIT_SPRG only used in critical exception handling */ #define CRIT_SPRG SPRN_SPRG2 /* MCHECK_SPRG only used in machine check exception handling */ #define MCHECK_SPRG SPRN_SPRG6W -#define MCHECK_STACK_TOP (exception_stack_top - 4096) -#define CRIT_STACK_TOP (exception_stack_top) +#define MCHECK_STACK_BASE mcheckirq_ctx +#define CRIT_STACK_BASE critirq_ctx /* only on e200 for now */ -#define DEBUG_STACK_TOP (exception_stack_top - 8192) +#define DEBUG_STACK_BASE dbgirq_ctx #define DEBUG_SPRG SPRN_SPRG6W #ifdef CONFIG_SMP #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ mfspr r8,SPRN_PIR; \ - mulli r8,r8,BOOKE_EXCEPTION_STACK_SIZE; \ - neg r8,r8; \ - addis r8,r8,level##_STACK_TOP@ha; \ - addi r8,r8,level##_STACK_TOP@l + slwi r8,r8,2; \ + addis r8,r8,level##_STACK_BASE@ha; \ + lwz r8,level##_STACK_BASE@l(r8); \ + addi r8,r8,THREAD_SIZE; #else #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ - lis r8,level##_STACK_TOP@h; \ - ori r8,r8,level##_STACK_TOP@l + lis r8,level##_STACK_BASE@ha; \ + lwz r8,level##_STACK_BASE@l(r8); \ + addi r8,r8,THREAD_SIZE; #endif /* diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index e581524..503f860 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -1080,15 +1080,6 @@ empty_zero_page: swapper_pg_dir: .space PGD_TABLE_SIZE -/* Reserved 4k for the critical exception stack & 4k for the machine - * check stack per CPU for kernel mode exceptions */ - .section .bss - .align 12 -exception_stack_bottom: - .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS - .globl exception_stack_top -exception_stack_top: - /* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2f73f70..b519975 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -356,9 +356,42 @@ void __init init_IRQ(void) { if (ppc_md.init_IRQ) ppc_md.init_IRQ(); + + exc_lvl_ctx_init(); + irq_ctx_init(); } +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; +struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; +struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; + +void exc_lvl_ctx_init(void) +{ + struct thread_info *tp; + int i; + + for_each_possible_cpu(i) { + memset((void *)critirq_ctx[i], 0, THREAD_SIZE); + tp = critirq_ctx[i]; + tp->cpu = i; + tp->preempt_count = 0; + +#ifdef CONFIG_BOOKE + memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); + tp = dbgirq_ctx[i]; + tp->cpu = i; + tp->preempt_count = 0; + + memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); + tp = mcheckirq_ctx[i]; + tp->cpu = i; + tp->preempt_count = HARDIRQ_OFFSET; +#endif + } +} +#endif #ifdef CONFIG_IRQSTACKS struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 5112a4a..bef0be3 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -248,6 +248,28 @@ static void __init irqstack_early_init(void) #define irqstack_early_init() #endif +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +static void __init exc_lvl_early_init(void) +{ + unsigned int i; + + /* interrupt stacks must be in lowmem, we get that for free on ppc32 + * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ + for_each_possible_cpu(i) { + critirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); +#ifdef CONFIG_BOOKE + dbgirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + mcheckirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); +#endif + } +} +#else +#define exc_lvl_early_init() +#endif + /* Warning, IO base is not yet inited */ void __init setup_arch(char **cmdline_p) { @@ -305,6 +327,8 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = klimit; + exc_lvl_early_init(); + irqstack_early_init(); /* set up the bootmem stuff with available memory */ diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 5089deb..1ef8e30 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -619,6 +619,19 @@ struct pt_regs; #define __ARCH_HAS_DO_SOFTIRQ +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +/* + * Per-cpu stacks for handling critical, debug and machine check + * level interrupts. + */ +extern struct thread_info *critirq_ctx[NR_CPUS]; +extern struct thread_info *dbgirq_ctx[NR_CPUS]; +extern struct thread_info *mcheckirq_ctx[NR_CPUS]; +extern void exc_lvl_ctx_init(void); +#else +#define exc_lvl_ctx_init() +#endif + #ifdef CONFIG_IRQSTACKS /* * Per-cpu stacks for handling hard and soft interrupts. -- 1.5.4.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code 2008-05-01 6:29 [PATCH v2 1/3] [POWERPC] Move to runtime allocated exception stacks Kumar Gala @ 2008-05-01 6:29 ` Kumar Gala 2008-05-01 6:29 ` [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers Kumar Gala 0 siblings, 1 reply; 8+ messages in thread From: Kumar Gala @ 2008-05-01 6:29 UTC (permalink / raw) To: paulus, benh; +Cc: linuxppc-dev * Cleanup the code a bit my allocating an INT_FRAME on our exception stack there by make references go from GPR11-INT_FRAME_SIZE(r8) to just GPR11(r8) * simplify {lvl}_transfer_to_handler code by moving the copying of the temp registers we use if we come from user space into the PROLOG * If the exception came from kernel mode copy thread_info flags, preempt, and task pointer from the process thread_info. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> --- * Fixed commit comment * reworked asm code a bit for possible performance improvement (having a second temp regsiter to load/store from). arch/powerpc/kernel/entry_32.S | 13 --------- arch/powerpc/kernel/head_booke.h | 52 ++++++++++++++++++++++++-------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 0c8614d..816dd54 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -44,29 +44,16 @@ #endif #ifdef CONFIG_BOOKE -#include "head_booke.h" -#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \ - mtspr exc_level##_SPRG,r8; \ - BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \ - lwz r0,GPR10-INT_FRAME_SIZE(r8); \ - stw r0,GPR10(r11); \ - lwz r0,GPR11-INT_FRAME_SIZE(r8); \ - stw r0,GPR11(r11); \ - mfspr r8,exc_level##_SPRG - .globl mcheck_transfer_to_handler mcheck_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK) b transfer_to_handler_full .globl debug_transfer_to_handler debug_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG) b transfer_to_handler_full .globl crit_transfer_to_handler crit_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT) /* fall through */ #endif diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 9eacf4c..667c78e 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -78,12 +78,12 @@ slwi r8,r8,2; \ addis r8,r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,THREAD_SIZE; + addi r8,r8,THREAD_SIZE-INT_FRAME_SIZE; #else #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ lis r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,THREAD_SIZE; + addi r8,r8,THREAD_SIZE-INT_FRAME_SIZE; #endif /* @@ -97,22 +97,36 @@ #define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \ mtspr exc_level##_SPRG,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ - stw r10,GPR10-INT_FRAME_SIZE(r8); \ - stw r11,GPR11-INT_FRAME_SIZE(r8); \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,exc_level_srr1; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - mr r11,r8; \ - mfspr r8,exc_level##_SPRG; \ - beq 1f; \ - /* COMING FROM USER MODE */ \ + stw r9,GPR9(r8); /* save various registers */\ + mfcr r9; /* save CR in r9 for now */\ + stw r10,GPR10(r8); \ + stw r11,GPR11(r8); \ + stw r9,_CCR(r8); /* save CR on stack */\ + mfspr r10,exc_level_srr1; /* check whether user or kernel */\ + andi. r10,r10,MSR_PR; \ mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ - addi r11,r11,THREAD_SIZE; \ -1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ + addi r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc exception frm */\ + beq 1f; \ + /* COMING FROM USER MODE */ \ + stw r9,_CCR(r11); /* save CR */\ + lwz r10,GPR10(r8); /* copy regs from exception stack */\ + lwz r9,GPR9(r8); \ + stw r10,GPR10(r11); \ + lwz r10,GPR11(r8); \ stw r9,GPR9(r11); \ + stw r10,GPR11(r11); \ + b 2f; \ + /* COMING FROM PRIV MODE */ \ +1: lwz r9,TI_FLAGS-THREAD_SIZE(r11); \ + lwz r10,TI_PREEMPT-THREAD_SIZE(r11); \ + stw r9,TI_FLAGS-THREAD_SIZE(r8); \ + stw r10,TI_PREEMPT-THREAD_SIZE(r8); \ + lwz r9,TI_TASK-THREAD_SIZE(r11); \ + stw r9,TI_TASK-THREAD_SIZE(r8); \ + mr r11,r8; \ +2: mfspr r8,exc_level##_SPRG; \ + stw r12,GPR12(r11); /* save various registers */\ mflr r10; \ stw r10,_LINK(r11); \ mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ @@ -255,8 +269,8 @@ label: lwz r12,GPR12(r11); \ mtspr DEBUG_SPRG,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(DEBUG); /* r8 points to the debug stack */ \ - lwz r10,GPR10-INT_FRAME_SIZE(r8); \ - lwz r11,GPR11-INT_FRAME_SIZE(r8); \ + lwz r10,GPR10(r8); \ + lwz r11,GPR11(r8); \ mfspr r8,DEBUG_SPRG; \ \ RFDI; \ @@ -308,8 +322,8 @@ label: lwz r12,GPR12(r11); \ mtspr CRIT_SPRG,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(CRIT); /* r8 points to the debug stack */ \ - lwz r10,GPR10-INT_FRAME_SIZE(r8); \ - lwz r11,GPR11-INT_FRAME_SIZE(r8); \ + lwz r10,GPR10(r8); \ + lwz r11,GPR11(r8); \ mfspr r8,CRIT_SPRG; \ \ rfci; \ -- 1.5.4.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers 2008-05-01 6:29 ` [PATCH v2 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code Kumar Gala @ 2008-05-01 6:29 ` Kumar Gala 0 siblings, 0 replies; 8+ messages in thread From: Kumar Gala @ 2008-05-01 6:29 UTC (permalink / raw) To: paulus, benh; +Cc: linuxppc-dev On machines with more than one exception level any system register that might be modified by the "normal" exception level needs to be saved and restored on taking a higher level exception. We already are saving and restoring ESR and DEAR. For critical level add SRR0/1. For debug level add CSRR0/1 and SRR0/1. For machine check level add DSRR0/1, CSRR0/1, and SRR0/1. On FSL Book-E parts we always save/restore the MAS registers for critical, debug, and machine check level exceptions. On 44x we always save/restore the MMUCR. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> --- * Added saving/restoring of 44x MMUCR. arch/powerpc/kernel/asm-offsets.c | 27 +++++++++++ arch/powerpc/kernel/entry_32.S | 93 ++++++++++++++++++++++++++++++++++++- arch/powerpc/kernel/head_40x.S | 4 ++ arch/powerpc/kernel/head_booke.h | 24 +++++++++- 4 files changed, 144 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ec9228d..c106f1f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -52,6 +52,15 @@ #include <asm/iseries/alpaca.h> #endif +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +#include "head_booke.h" +#endif + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + int main(void) { DEFINE(THREAD, offsetof(struct task_struct, thread)); @@ -242,6 +251,24 @@ int main(void) DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); #endif /* CONFIG_PPC64 */ +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) + DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); + DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); + /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ + DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); + DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); + DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); + DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); + DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); + DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); + DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); + DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); + DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); + DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); + DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); + DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); +#endif + DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 816dd54..2cd7ecc 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -46,14 +46,46 @@ #ifdef CONFIG_BOOKE .globl mcheck_transfer_to_handler mcheck_transfer_to_handler: - b transfer_to_handler_full + mfspr r0,SPRN_DSRR0 + stw r0,_DSRR0(r11) + mfspr r0,SPRN_DSRR1 + stw r0,_DSRR1(r11) + /* fall through */ .globl debug_transfer_to_handler debug_transfer_to_handler: - b transfer_to_handler_full + mfspr r0,SPRN_CSRR0 + stw r0,_CSRR0(r11) + mfspr r0,SPRN_CSRR1 + stw r0,_CSRR1(r11) + /* fall through */ .globl crit_transfer_to_handler crit_transfer_to_handler: +#ifdef CONFIG_FSL_BOOKE + mfspr r0,SPRN_MAS0 + stw r0,MAS0(r11) + mfspr r0,SPRN_MAS1 + stw r0,MAS1(r11) + mfspr r0,SPRN_MAS2 + stw r0,MAS2(r11) + mfspr r0,SPRN_MAS3 + stw r0,MAS3(r11) + mfspr r0,SPRN_MAS6 + stw r0,MAS6(r11) +#ifdef CONFIG_PHYS_64BIT + mfspr r0,SPRN_MAS7 + stw r0,MAS7(r11) +#endif /* CONFIG_PHYS_64BIT */ +#endif /* CONFIG_FSL_BOOKE */ +#ifdef CONFIG_44x + mfspr r0,SPRN_MMUCR + stw r0,MMUCR(r11) +#endif + mfspr r0,SPRN_SRR0 + stw r0,_SRR0(r11) + mfspr r0,SPRN_SRR1 + stw r0,_SRR1(r11) /* fall through */ #endif @@ -64,6 +96,10 @@ crit_transfer_to_handler: stw r0,GPR10(r11) lwz r0,crit_r11@l(0) stw r0,GPR11(r11) + mfspr r0,SPRN_SRR0 + stw r0,crit_srr0@l(0) + mfspr r0,SPRN_SRR1 + stw r0,crit_srr1@l(0) /* fall through */ #endif @@ -846,17 +882,70 @@ exc_exit_restart_end: exc_lvl_rfi; \ b .; /* prevent prefetch past exc_lvl_rfi */ +#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \ + lwz r9,_##exc_lvl_srr0(r1); \ + lwz r10,_##exc_lvl_srr1(r1); \ + mtspr SPRN_##exc_lvl_srr0,r9; \ + mtspr SPRN_##exc_lvl_srr1,r10; + +#if defined(CONFIG_FSL_BOOKE) +#ifdef CONFIG_PHYS_64BIT +#define RESTORE_MAS7 \ + lwz r11,MAS7(r1); \ + mtspr SPRN_MAS7,r11; +#else +#define RESTORE_MAS7 +#endif /* CONFIG_PHYS_64BIT */ +#define RESTORE_MMU_REGS \ + lwz r9,MAS0(r1); \ + lwz r10,MAS1(r1); \ + lwz r11,MAS2(r1); \ + mtspr SPRN_MAS0,r9; \ + lwz r9,MAS3(r1); \ + mtspr SPRN_MAS1,r10; \ + lwz r10,MAS6(r1); \ + mtspr SPRN_MAS2,r11; \ + mtspr SPRN_MAS3,r9; \ + mtspr SPRN_MAS6,r10; \ + RESTORE_MAS7; +#elif defined(CONFIG_44x) +#define RESTORE_MMU_REGS \ + lwz r9,MMUCR(r1); \ + mtspr SPRN_MMUCR,r9; \ +#else +#define RESTORE_MMU_REGS +#endif + +#ifdef CONFIG_40x .globl ret_from_crit_exc ret_from_crit_exc: + lwz r9,crit_srr0@l(0); + lwz r10,crit_srr1@l(0); + mtspr SPRN_SRR0,r9; + mtspr SPRN_SRR1,r10; RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) +#endif /* CONFIG_40x */ #ifdef CONFIG_BOOKE + .globl ret_from_crit_exc +ret_from_crit_exc: + RESTORE_xSRR(SRR0,SRR1); + RESTORE_MMU_REGS; + RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) + .globl ret_from_debug_exc ret_from_debug_exc: + RESTORE_xSRR(SRR0,SRR1); + RESTORE_xSRR(CSRR0,CSRR1); + RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI) .globl ret_from_mcheck_exc ret_from_mcheck_exc: + RESTORE_xSRR(SRR0,SRR1); + RESTORE_xSRR(CSRR0,CSRR1); + RESTORE_xSRR(DSRR0,DSRR1); + RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI) #endif /* CONFIG_BOOKE */ diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index ca75eaf..3c819a1 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -93,6 +93,10 @@ _ENTRY(crit_r10) .space 4 _ENTRY(crit_r11) .space 4 +_ENTRY(crit_srr0) + .space 4 +_ENTRY(crit_srr1) + .space 4 /* * Exception vector entry code. This code runs with address translation diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 667c78e..9c81efc 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -78,12 +78,12 @@ slwi r8,r8,2; \ addis r8,r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,THREAD_SIZE-INT_FRAME_SIZE; + addi r8,r8,THREAD_SIZE-INT_FRAME_SIZE-EXC_LVL_SIZE; #else #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ lis r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,THREAD_SIZE-INT_FRAME_SIZE; + addi r8,r8,THREAD_SIZE-INT_FRAME_SIZE-EXC_LVL_SIZE; #endif /* @@ -374,4 +374,24 @@ label: addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) +#ifndef __ASSEMBLY__ +/* ensure this structure is always sized to a multiple of the stack alignment */ +struct exception_regs { + unsigned long mas0; + unsigned long mas1; + unsigned long mas2; + unsigned long mas3; + unsigned long mas6; + unsigned long mas7; + unsigned long srr0; + unsigned long srr1; + unsigned long csrr0; + unsigned long csrr1; + unsigned long dsrr0; + unsigned long dsrr1; +}; + +#define STACK_EXC_LVL_FRAME_SIZE (sizeof (struct exception_regs)) + +#endif /* __ASSEMBLY__ */ #endif /* __HEAD_BOOKE_H__ */ -- 1.5.4.1 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v3 1/3] [POWERPC] Move to runtime allocated exception stacks @ 2008-05-16 19:04 Kumar Gala 2008-05-16 19:06 ` [PATCH v3 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code Kumar Gala 0 siblings, 1 reply; 8+ messages in thread From: Kumar Gala @ 2008-05-16 19:04 UTC (permalink / raw) To: Paul Mackerras; +Cc: linuxppc-dev For the additonal exception levels (critical, debug, machine check) on 40x/book-e we were using "static" allocations of the stack in the associated head.S. Move to a runtime allocation to make the code a bit easier to read as we mimic how we handle IRQ stacks. Its also a bit easier to setup the stack with a "dummy" thread_info in C code. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> --- arch/powerpc/kernel/head_40x.S | 14 ++------------ arch/powerpc/kernel/head_44x.S | 9 --------- arch/powerpc/kernel/head_booke.h | 29 +++++++++++------------------ arch/powerpc/kernel/head_fsl_booke.S | 9 --------- arch/powerpc/kernel/irq.c | 33 +++++++++++++++++++++++++++++++++ arch/powerpc/kernel/setup_32.c | 24 ++++++++++++++++++++++++ include/asm-powerpc/irq.h | 13 +++++++++++++ 7 files changed, 83 insertions(+), 48 deletions(-) diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 8552e67..ca75eaf 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -148,8 +148,8 @@ _ENTRY(crit_r11) mfcr r10; /* save CR in r10 for now */\ mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ andi. r11,r11,MSR_PR; \ - lis r11,critical_stack_top@h; \ - ori r11,r11,critical_stack_top@l; \ + lis r11,critirq_ctx@ha; \ + lwz r11,critirq_ctx@l(r11); \ beq 1f; \ /* COMING FROM USER MODE */ \ mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ @@ -996,16 +996,6 @@ empty_zero_page: swapper_pg_dir: .space PGD_TABLE_SIZE - -/* Stack for handling critical exceptions from kernel mode */ - .section .bss - .align 12 -exception_stack_bottom: - .space 4096 -critical_stack_top: - .globl exception_stack_top -exception_stack_top: - /* Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. */ diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index c2b9dc4..47ea8af 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -737,15 +737,6 @@ empty_zero_page: swapper_pg_dir: .space PGD_TABLE_SIZE -/* Reserved 4k for the critical exception stack & 4k for the machine - * check stack per CPU for kernel mode exceptions */ - .section .bss - .align 12 -exception_stack_bottom: - .space BOOKE_EXCEPTION_STACK_SIZE - .globl exception_stack_top -exception_stack_top: - /* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 721faef..9eacf4c 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -43,9 +43,7 @@ SAVE_2GPRS(7, r11) /* To handle the additional exception priority levels on 40x and Book-E - * processors we allocate a 4k stack per additional priority level. The various - * head_xxx.S files allocate space (exception_stack_top) for each priority's - * stack times the number of CPUs + * processors we allocate a stack per additional priority level. * * On 40x critical is the only additional level * On 44x/e500 we have critical and machine check @@ -61,36 +59,31 @@ * going to critical or their own debug level we aren't currently * providing configurations that micro-optimize space usage. */ -#ifdef CONFIG_44x -#define NUM_EXCEPTION_LVLS 2 -#else -#define NUM_EXCEPTION_LVLS 3 -#endif -#define BOOKE_EXCEPTION_STACK_SIZE (4096 * NUM_EXCEPTION_LVLS) /* CRIT_SPRG only used in critical exception handling */ #define CRIT_SPRG SPRN_SPRG2 /* MCHECK_SPRG only used in machine check exception handling */ #define MCHECK_SPRG SPRN_SPRG6W -#define MCHECK_STACK_TOP (exception_stack_top - 4096) -#define CRIT_STACK_TOP (exception_stack_top) +#define MCHECK_STACK_BASE mcheckirq_ctx +#define CRIT_STACK_BASE critirq_ctx /* only on e200 for now */ -#define DEBUG_STACK_TOP (exception_stack_top - 8192) +#define DEBUG_STACK_BASE dbgirq_ctx #define DEBUG_SPRG SPRN_SPRG6W #ifdef CONFIG_SMP #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ mfspr r8,SPRN_PIR; \ - mulli r8,r8,BOOKE_EXCEPTION_STACK_SIZE; \ - neg r8,r8; \ - addis r8,r8,level##_STACK_TOP@ha; \ - addi r8,r8,level##_STACK_TOP@l + slwi r8,r8,2; \ + addis r8,r8,level##_STACK_BASE@ha; \ + lwz r8,level##_STACK_BASE@l(r8); \ + addi r8,r8,THREAD_SIZE; #else #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ - lis r8,level##_STACK_TOP@h; \ - ori r8,r8,level##_STACK_TOP@l + lis r8,level##_STACK_BASE@ha; \ + lwz r8,level##_STACK_BASE@l(r8); \ + addi r8,r8,THREAD_SIZE; #endif /* diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index e581524..503f860 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -1080,15 +1080,6 @@ empty_zero_page: swapper_pg_dir: .space PGD_TABLE_SIZE -/* Reserved 4k for the critical exception stack & 4k for the machine - * check stack per CPU for kernel mode exceptions */ - .section .bss - .align 12 -exception_stack_bottom: - .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS - .globl exception_stack_top -exception_stack_top: - /* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2f73f70..b519975 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -356,9 +356,42 @@ void __init init_IRQ(void) { if (ppc_md.init_IRQ) ppc_md.init_IRQ(); + + exc_lvl_ctx_init(); + irq_ctx_init(); } +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; +struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; +struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; + +void exc_lvl_ctx_init(void) +{ + struct thread_info *tp; + int i; + + for_each_possible_cpu(i) { + memset((void *)critirq_ctx[i], 0, THREAD_SIZE); + tp = critirq_ctx[i]; + tp->cpu = i; + tp->preempt_count = 0; + +#ifdef CONFIG_BOOKE + memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); + tp = dbgirq_ctx[i]; + tp->cpu = i; + tp->preempt_count = 0; + + memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); + tp = mcheckirq_ctx[i]; + tp->cpu = i; + tp->preempt_count = HARDIRQ_OFFSET; +#endif + } +} +#endif #ifdef CONFIG_IRQSTACKS struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 5112a4a..bef0be3 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -248,6 +248,28 @@ static void __init irqstack_early_init(void) #define irqstack_early_init() #endif +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +static void __init exc_lvl_early_init(void) +{ + unsigned int i; + + /* interrupt stacks must be in lowmem, we get that for free on ppc32 + * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ + for_each_possible_cpu(i) { + critirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); +#ifdef CONFIG_BOOKE + dbgirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + mcheckirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); +#endif + } +} +#else +#define exc_lvl_early_init() +#endif + /* Warning, IO base is not yet inited */ void __init setup_arch(char **cmdline_p) { @@ -305,6 +327,8 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = klimit; + exc_lvl_early_init(); + irqstack_early_init(); /* set up the bootmem stuff with available memory */ diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 5089deb..1ef8e30 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -619,6 +619,19 @@ struct pt_regs; #define __ARCH_HAS_DO_SOFTIRQ +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +/* + * Per-cpu stacks for handling critical, debug and machine check + * level interrupts. + */ +extern struct thread_info *critirq_ctx[NR_CPUS]; +extern struct thread_info *dbgirq_ctx[NR_CPUS]; +extern struct thread_info *mcheckirq_ctx[NR_CPUS]; +extern void exc_lvl_ctx_init(void); +#else +#define exc_lvl_ctx_init() +#endif + #ifdef CONFIG_IRQSTACKS /* * Per-cpu stacks for handling hard and soft interrupts. -- 1.5.4.5 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v3 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code 2008-05-16 19:04 [PATCH v3 1/3] [POWERPC] Move to runtime allocated exception stacks Kumar Gala @ 2008-05-16 19:06 ` Kumar Gala 2008-05-16 19:08 ` [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers Kumar Gala 0 siblings, 1 reply; 8+ messages in thread From: Kumar Gala @ 2008-05-16 19:06 UTC (permalink / raw) To: Paul Mackerras; +Cc: linuxppc-dev * Cleanup the code a bit my allocating an INT_FRAME on our exception stack there by make references go from GPR11-INT_FRAME_SIZE(r8) to just GPR11(r8) * simplify {lvl}_transfer_to_handler code by moving the copying of the temp registers we use if we come from user space into the PROLOG * If the exception came from kernel mode copy thread_info flags, preempt, and task pointer from the process thread_info. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> --- * fix copying of thread_info fields onto proper stack - k arch/powerpc/kernel/entry_32.S | 13 --------- arch/powerpc/kernel/head_booke.h | 54 ++++++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3a05e9f..13aea53 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -44,29 +44,16 @@ #endif #ifdef CONFIG_BOOKE -#include "head_booke.h" -#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \ - mtspr exc_level##_SPRG,r8; \ - BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \ - lwz r0,GPR10-INT_FRAME_SIZE(r8); \ - stw r0,GPR10(r11); \ - lwz r0,GPR11-INT_FRAME_SIZE(r8); \ - stw r0,GPR11(r11); \ - mfspr r8,exc_level##_SPRG - .globl mcheck_transfer_to_handler mcheck_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK) b transfer_to_handler_full .globl debug_transfer_to_handler debug_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG) b transfer_to_handler_full .globl crit_transfer_to_handler crit_transfer_to_handler: - TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT) /* fall through */ #endif diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 9eacf4c..b0874d2 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -72,18 +72,20 @@ #define DEBUG_STACK_BASE dbgirq_ctx #define DEBUG_SPRG SPRN_SPRG6W +#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE) + #ifdef CONFIG_SMP #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ mfspr r8,SPRN_PIR; \ slwi r8,r8,2; \ addis r8,r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,THREAD_SIZE; + addi r8,r8,EXC_LVL_FRAME_OVERHEAD; #else #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ lis r8,level##_STACK_BASE@ha; \ lwz r8,level##_STACK_BASE@l(r8); \ - addi r8,r8,THREAD_SIZE; + addi r8,r8,EXC_LVL_FRAME_OVERHEAD; #endif /* @@ -97,22 +99,36 @@ #define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \ mtspr exc_level##_SPRG,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ - stw r10,GPR10-INT_FRAME_SIZE(r8); \ - stw r11,GPR11-INT_FRAME_SIZE(r8); \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,exc_level_srr1; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - mr r11,r8; \ - mfspr r8,exc_level##_SPRG; \ - beq 1f; \ - /* COMING FROM USER MODE */ \ + stw r9,GPR9(r8); /* save various registers */\ + mfcr r9; /* save CR in r9 for now */\ + stw r10,GPR10(r8); \ + stw r11,GPR11(r8); \ + stw r9,_CCR(r8); /* save CR on stack */\ + mfspr r10,exc_level_srr1; /* check whether user or kernel */\ + andi. r10,r10,MSR_PR; \ mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ - addi r11,r11,THREAD_SIZE; \ -1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ + addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ + beq 1f; \ + /* COMING FROM USER MODE */ \ + stw r9,_CCR(r11); /* save CR */\ + lwz r10,GPR10(r8); /* copy regs from exception stack */\ + lwz r9,GPR9(r8); \ + stw r10,GPR10(r11); \ + lwz r10,GPR11(r8); \ stw r9,GPR9(r11); \ + stw r10,GPR11(r11); \ + b 2f; \ + /* COMING FROM PRIV MODE */ \ +1: lwz r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r11); \ + lwz r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r11); \ + stw r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r8); \ + stw r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r8); \ + lwz r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r11); \ + stw r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r8); \ + mr r11,r8; \ +2: mfspr r8,exc_level##_SPRG; \ + stw r12,GPR12(r11); /* save various registers */\ mflr r10; \ stw r10,_LINK(r11); \ mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ @@ -255,8 +271,8 @@ label: lwz r12,GPR12(r11); \ mtspr DEBUG_SPRG,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(DEBUG); /* r8 points to the debug stack */ \ - lwz r10,GPR10-INT_FRAME_SIZE(r8); \ - lwz r11,GPR11-INT_FRAME_SIZE(r8); \ + lwz r10,GPR10(r8); \ + lwz r11,GPR11(r8); \ mfspr r8,DEBUG_SPRG; \ \ RFDI; \ @@ -308,8 +324,8 @@ label: lwz r12,GPR12(r11); \ mtspr CRIT_SPRG,r8; \ BOOKE_LOAD_EXC_LEVEL_STACK(CRIT); /* r8 points to the debug stack */ \ - lwz r10,GPR10-INT_FRAME_SIZE(r8); \ - lwz r11,GPR11-INT_FRAME_SIZE(r8); \ + lwz r10,GPR10(r8); \ + lwz r11,GPR11(r8); \ mfspr r8,CRIT_SPRG; \ \ rfci; \ -- 1.5.4.5 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers 2008-05-16 19:06 ` [PATCH v3 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code Kumar Gala @ 2008-05-16 19:08 ` Kumar Gala 2008-05-17 2:55 ` Stephen Rothwell 2008-05-20 2:04 ` Josh Boyer 0 siblings, 2 replies; 8+ messages in thread From: Kumar Gala @ 2008-05-16 19:08 UTC (permalink / raw) To: Paul Mackerras; +Cc: linuxppc-dev On machines with more than one exception level any system register that might be modified by the "normal" exception level needs to be saved and restored on taking a higher level exception. We already are saving and restoring ESR and DEAR. For critical level add SRR0/1. For debug level add CSRR0/1 and SRR0/1. For machine check level add DSRR0/1, CSRR0/1, and SRR0/1. On FSL Book-E parts we always save/restore the MAS registers for critical, debug, and machine check level exceptions. On 44x we always save/restore the MMUCR. Additionally, we save and restore the ksp_limit since we have to adjust it for each exception level. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> --- * Added the ksp_limit save/restore and setting * propogate preempt_count between the normal exception stack and debug stack this is needed for kprobes to work properly. arch/powerpc/kernel/asm-offsets.c | 28 +++++++++ arch/powerpc/kernel/entry_32.S | 121 ++++++++++++++++++++++++++++++++++++- arch/powerpc/kernel/head_40x.S | 6 ++ arch/powerpc/kernel/head_booke.h | 23 +++++++- 4 files changed, 175 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ec9228d..a504273 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -52,6 +52,15 @@ #include <asm/iseries/alpaca.h> #endif +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) +#include "head_booke.h" +#endif + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + int main(void) { DEFINE(THREAD, offsetof(struct task_struct, thread)); @@ -242,6 +251,25 @@ int main(void) DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); #endif /* CONFIG_PPC64 */ +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) + DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); + DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); + /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ + DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); + DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); + DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); + DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); + DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); + DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); + DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); + DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); + DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); + DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); + DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); + DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); + DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit)); +#endif + DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 13aea53..24e32fb 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -46,14 +46,52 @@ #ifdef CONFIG_BOOKE .globl mcheck_transfer_to_handler mcheck_transfer_to_handler: - b transfer_to_handler_full + mfspr r0,SPRN_DSRR0 + stw r0,_DSRR0(r11) + mfspr r0,SPRN_DSRR1 + stw r0,_DSRR1(r11) + /* fall through */ .globl debug_transfer_to_handler debug_transfer_to_handler: - b transfer_to_handler_full + mfspr r0,SPRN_CSRR0 + stw r0,_CSRR0(r11) + mfspr r0,SPRN_CSRR1 + stw r0,_CSRR1(r11) + /* fall through */ .globl crit_transfer_to_handler crit_transfer_to_handler: +#ifdef CONFIG_FSL_BOOKE + mfspr r0,SPRN_MAS0 + stw r0,MAS0(r11) + mfspr r0,SPRN_MAS1 + stw r0,MAS1(r11) + mfspr r0,SPRN_MAS2 + stw r0,MAS2(r11) + mfspr r0,SPRN_MAS3 + stw r0,MAS3(r11) + mfspr r0,SPRN_MAS6 + stw r0,MAS6(r11) +#ifdef CONFIG_PHYS_64BIT + mfspr r0,SPRN_MAS7 + stw r0,MAS7(r11) +#endif /* CONFIG_PHYS_64BIT */ +#endif /* CONFIG_FSL_BOOKE */ +#ifdef CONFIG_44x + mfspr r0,SPRN_MMUCR + stw r0,MMUCR(r11) +#endif + mfspr r0,SPRN_SRR0 + stw r0,_SRR0(r11) + mfspr r0,SPRN_SRR1 + stw r0,_SRR1(r11) + + mfspr r8,SPRN_SPRG3 + lwz r0,KSP_LIMIT(r8) + stw r0,SAVED_KSP_LIMIT(r11) + rlwimi r0,r1,0,0,(31-THREAD_SHIFT) + stw r0,KSP_LIMIT(r8) /* fall through */ #endif @@ -64,6 +102,16 @@ crit_transfer_to_handler: stw r0,GPR10(r11) lwz r0,crit_r11@l(0) stw r0,GPR11(r11) + mfspr r0,SPRN_SRR0 + stw r0,crit_srr0@l(0) + mfspr r0,SPRN_SRR1 + stw r0,crit_srr1@l(0) + + mfspr r8,SPRN_SPRG3 + lwz r0,KSP_LIMIT(r8) + stw r0,saved_ksp_limit@l(0) + rlwimi r0,r1,0,0,(31-THREAD_SHIFT) + stw r0,KSP_LIMIT(r8) /* fall through */ #endif @@ -846,17 +894,86 @@ exc_exit_restart_end: exc_lvl_rfi; \ b .; /* prevent prefetch past exc_lvl_rfi */ +#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \ + lwz r9,_##exc_lvl_srr0(r1); \ + lwz r10,_##exc_lvl_srr1(r1); \ + mtspr SPRN_##exc_lvl_srr0,r9; \ + mtspr SPRN_##exc_lvl_srr1,r10; + +#if defined(CONFIG_FSL_BOOKE) +#ifdef CONFIG_PHYS_64BIT +#define RESTORE_MAS7 \ + lwz r11,MAS7(r1); \ + mtspr SPRN_MAS7,r11; +#else +#define RESTORE_MAS7 +#endif /* CONFIG_PHYS_64BIT */ +#define RESTORE_MMU_REGS \ + lwz r9,MAS0(r1); \ + lwz r10,MAS1(r1); \ + lwz r11,MAS2(r1); \ + mtspr SPRN_MAS0,r9; \ + lwz r9,MAS3(r1); \ + mtspr SPRN_MAS1,r10; \ + lwz r10,MAS6(r1); \ + mtspr SPRN_MAS2,r11; \ + mtspr SPRN_MAS3,r9; \ + mtspr SPRN_MAS6,r10; \ + RESTORE_MAS7; +#elif defined(CONFIG_44x) +#define RESTORE_MMU_REGS \ + lwz r9,MMUCR(r1); \ + mtspr SPRN_MMUCR,r9; \ +#else +#define RESTORE_MMU_REGS +#endif + +#ifdef CONFIG_40x .globl ret_from_crit_exc ret_from_crit_exc: + mfspr r9,SPRN_SPRG3 + lwz r10,saved_ksp_limit@l(0); + stw r10,KSP_LIMIT(r9) + lwz r9,crit_srr0@l(0); + lwz r10,crit_srr1@l(0); + mtspr SPRN_SRR0,r9; + mtspr SPRN_SRR1,r10; RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) +#endif /* CONFIG_40x */ #ifdef CONFIG_BOOKE + .globl ret_from_crit_exc +ret_from_crit_exc: + mfspr r9,SPRN_SPRG3 + lwz r10,SAVED_KSP_LIMIT(r1) + stw r10,KSP_LIMIT(r9) + RESTORE_xSRR(SRR0,SRR1); + RESTORE_MMU_REGS; + RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) + .globl ret_from_debug_exc ret_from_debug_exc: + mfspr r9,SPRN_SPRG3 + lwz r10,SAVED_KSP_LIMIT(r1) + stw r10,KSP_LIMIT(r9) + lwz r9,THREAD_INFO-THREAD(r9) + rlwinm r10,r1,0,0,(31-THREAD_SHIFT) + lwz r10,TI_PREEMPT(r10) + stw r10,TI_PREEMPT(r9) + RESTORE_xSRR(SRR0,SRR1); + RESTORE_xSRR(CSRR0,CSRR1); + RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI) .globl ret_from_mcheck_exc ret_from_mcheck_exc: + mfspr r9,SPRN_SPRG3 + lwz r10,SAVED_KSP_LIMIT(r1) + stw r10,KSP_LIMIT(r9) + RESTORE_xSRR(SRR0,SRR1); + RESTORE_xSRR(CSRR0,CSRR1); + RESTORE_xSRR(DSRR0,DSRR1); + RESTORE_MMU_REGS; RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI) #endif /* CONFIG_BOOKE */ diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index ca75eaf..7ae1787 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -93,6 +93,12 @@ _ENTRY(crit_r10) .space 4 _ENTRY(crit_r11) .space 4 +_ENTRY(crit_srr0) + .space 4 +_ENTRY(crit_srr1) + .space 4 +_ENTRY(saved_ksp_limit) + .space 4 /* * Exception vector entry code. This code runs with address translation diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index b0874d2..f277fad 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -72,7 +72,7 @@ #define DEBUG_STACK_BASE dbgirq_ctx #define DEBUG_SPRG SPRN_SPRG6W -#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE) +#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE) #ifdef CONFIG_SMP #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ @@ -376,4 +376,25 @@ label: addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) +#ifndef __ASSEMBLY__ +struct exception_regs { + unsigned long mas0; + unsigned long mas1; + unsigned long mas2; + unsigned long mas3; + unsigned long mas6; + unsigned long mas7; + unsigned long srr0; + unsigned long srr1; + unsigned long csrr0; + unsigned long csrr1; + unsigned long dsrr0; + unsigned long dsrr1; + unsigned long saved_ksp_limit; +}; + +/* ensure this structure is always sized to a multiple of the stack alignment */ +#define STACK_EXC_LVL_FRAME_SIZE _ALIGN_UP(sizeof (struct exception_regs), 16) + +#endif /* __ASSEMBLY__ */ #endif /* __HEAD_BOOKE_H__ */ -- 1.5.4.5 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers 2008-05-16 19:08 ` [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers Kumar Gala @ 2008-05-17 2:55 ` Stephen Rothwell 2008-05-17 15:00 ` Kumar Gala 2008-05-20 2:04 ` Josh Boyer 1 sibling, 1 reply; 8+ messages in thread From: Stephen Rothwell @ 2008-05-17 2:55 UTC (permalink / raw) To: Kumar Gala; +Cc: linuxppc-dev, Paul Mackerras [-- Attachment #1: Type: text/plain, Size: 643 bytes --] Hi Kumar, On Fri, 16 May 2008 14:08:00 -0500 (CDT) Kumar Gala <galak@kernel.crashing.org> wrote: > > +++ b/arch/powerpc/kernel/asm-offsets.c > @@ -52,6 +52,15 @@ > #include <asm/iseries/alpaca.h> > #endif > > +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) > +#include "head_booke.h" > +#endif > + > +#define DEFINE(sym, val) \ > + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) > + > +#define BLANK() asm volatile("\n->" : : ) > + Why are these defines here? We already pick them up from kbuild.h ... -- Cheers, Stephen Rothwell sfr@canb.auug.org.au http://www.canb.auug.org.au/~sfr/ [-- Attachment #2: Type: application/pgp-signature, Size: 189 bytes --] ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers 2008-05-17 2:55 ` Stephen Rothwell @ 2008-05-17 15:00 ` Kumar Gala 0 siblings, 0 replies; 8+ messages in thread From: Kumar Gala @ 2008-05-17 15:00 UTC (permalink / raw) To: Stephen Rothwell; +Cc: linuxppc-dev, Paul Mackerras On May 16, 2008, at 9:55 PM, Stephen Rothwell wrote: > Hi Kumar, > > On Fri, 16 May 2008 14:08:00 -0500 (CDT) Kumar Gala <galak@kernel.crashing.org > > wrote: >> >> +++ b/arch/powerpc/kernel/asm-offsets.c >> @@ -52,6 +52,15 @@ >> #include <asm/iseries/alpaca.h> >> #endif >> >> +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) >> +#include "head_booke.h" >> +#endif >> + >> +#define DEFINE(sym, val) \ >> + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) >> + >> +#define BLANK() asm volatile("\n->" : : ) >> + > > Why are these defines here? We already pick them up from kbuild.h ... thanks, this is a mismerge from some old tree, will clean up. - k ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers 2008-05-16 19:08 ` [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers Kumar Gala 2008-05-17 2:55 ` Stephen Rothwell @ 2008-05-20 2:04 ` Josh Boyer 2008-05-20 5:03 ` Kumar Gala 1 sibling, 1 reply; 8+ messages in thread From: Josh Boyer @ 2008-05-20 2:04 UTC (permalink / raw) To: Kumar Gala; +Cc: linuxppc-dev, Paul Mackerras On Fri, 16 May 2008 14:08:00 -0500 (CDT) Kumar Gala <galak@kernel.crashing.org> wrote: > On machines with more than one exception level any system register that > might be modified by the "normal" exception level needs to be saved and > restored on taking a higher level exception. We already are saving > and restoring ESR and DEAR. > > For critical level add SRR0/1. > For debug level add CSRR0/1 and SRR0/1. > For machine check level add DSRR0/1, CSRR0/1, and SRR0/1. > > On FSL Book-E parts we always save/restore the MAS registers for critical, > debug, and machine check level exceptions. On 44x we always save/restore > the MMUCR. > > Additionally, we save and restore the ksp_limit since we have to adjust it > for each exception level. > > Signed-off-by: Kumar Gala <galak@kernel.crashing.org> > --- <snip> > --- a/arch/powerpc/kernel/entry_32.S > +++ b/arch/powerpc/kernel/entry_32.S <snip> > +#ifdef CONFIG_40x > .globl ret_from_crit_exc > ret_from_crit_exc: > + mfspr r9,SPRN_SPRG3 lis r10,saved_ksp_limit@ha; lwz r10,saved_ksp_limit@l(r10); tovirt(r9,r9); > + stw r10,KSP_LIMIT(r9) lis r9, crit_srr0@ha; lwz r9, crit_srr0@l(r9); lis r10, crit_srr1@ha; lwz r10, crit_srr1@l(r10); > + mtspr SPRN_SRR0,r9; > + mtspr SPRN_SRR1,r10; > RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) > +#endif /* CONFIG_40x */ With the above changes, I no longer get kernel panics on returning from critical exceptions. Breakpoints and single stepping worked on my 405GP board. Again, not stress tested but it's looking much better. josh ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers 2008-05-20 2:04 ` Josh Boyer @ 2008-05-20 5:03 ` Kumar Gala 0 siblings, 0 replies; 8+ messages in thread From: Kumar Gala @ 2008-05-20 5:03 UTC (permalink / raw) To: Josh Boyer; +Cc: linuxppc-dev@ozlabs.org list, Paul Mackerras >> <snip> > >> --- a/arch/powerpc/kernel/entry_32.S >> +++ b/arch/powerpc/kernel/entry_32.S > > <snip> > >> +#ifdef CONFIG_40x >> .globl ret_from_crit_exc >> ret_from_crit_exc: >> + mfspr r9,SPRN_SPRG3 > lis r10,saved_ksp_limit@ha; > lwz r10,saved_ksp_limit@l(r10); > tovirt(r9,r9); >> + stw r10,KSP_LIMIT(r9) > lis r9, crit_srr0@ha; > lwz r9, crit_srr0@l(r9); > lis r10, crit_srr1@ha; > lwz r10, crit_srr1@l(r10); >> + mtspr SPRN_SRR0,r9; >> + mtspr SPRN_SRR1,r10; >> RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) >> +#endif /* CONFIG_40x */ > > With the above changes, I no longer get kernel panics on returning > from > critical exceptions. Breakpoints and single stepping worked on my > 405GP board. > > Again, not stress tested but it's looking much better. > > josh Thanks. I've just posted v5 with the various fixes for 40x. Can you test those out to make sure they still work. - k ^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2008-05-20 5:03 UTC | newest] Thread overview: 8+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2008-05-01 6:29 [PATCH v2 1/3] [POWERPC] Move to runtime allocated exception stacks Kumar Gala 2008-05-01 6:29 ` [PATCH v2 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code Kumar Gala 2008-05-01 6:29 ` [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers Kumar Gala -- strict thread matches above, loose matches on Subject: below -- 2008-05-16 19:04 [PATCH v3 1/3] [POWERPC] Move to runtime allocated exception stacks Kumar Gala 2008-05-16 19:06 ` [PATCH v3 2/3] [POWERPC] Rework EXC_LEVEL_EXCEPTION_PROLOG code Kumar Gala 2008-05-16 19:08 ` [PATCH v3 3/3] [POWERPC] 40x/Book-E: Save/restore volatile exception registers Kumar Gala 2008-05-17 2:55 ` Stephen Rothwell 2008-05-17 15:00 ` Kumar Gala 2008-05-20 2:04 ` Josh Boyer 2008-05-20 5:03 ` Kumar Gala
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).