* [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers
@ 2009-08-19 5:08 Kumar Gala
2009-08-19 5:08 ` [PATCH 2/5] powerpc/book3e-64: Move the default cpu table entry Kumar Gala
2009-08-19 7:25 ` [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers Benjamin Herrenschmidt
0 siblings, 2 replies; 10+ messages in thread
From: Kumar Gala @ 2009-08-19 5:08 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
Support for TLB reservation (or TLB Write Conditional) and Paired MAS
registers are optional for a processor implementation so we handle
them via MMU feature sections.
We currently only used paired MAS registers to access the full RPN + perm
bits that are kept in MAS7||MAS3. We assume that if an implementation has
hardware page table at this time it also implements in TLB reservations.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
---
arch/powerpc/include/asm/mmu.h | 9 +++++++++
arch/powerpc/mm/tlb_low_64e.S | 36 +++++++++++++++++++++++++++++++++++-
2 files changed, 44 insertions(+), 1 deletions(-)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 2fcfefc..7ffbb65 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -58,6 +58,15 @@
*/
#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000)
+/* Enable use of TLB reservation. Processor should support tlbsrx.
+ * instruction and MAS0[WQ].
+ */
+#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
+
+/* Use paired MAS registers (MAS7||MAS3, etc.)
+ */
+#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
+
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 10d524d..5b8e274 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -189,12 +189,16 @@ normal_tlb_miss:
clrrdi r14,r14,3
or r10,r15,r14
+BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and seach for existing entry. Then load
* the entry.
*/
PPC_TLBSRX_DOT(0,r16)
ld r14,0(r10)
beq normal_tlb_miss_done
+MMU_FTR_SECTION_ELSE
+ ld r14,0(r10)
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
finish_normal_tlb_miss:
/* Check if required permissions are met */
@@ -241,7 +245,14 @@ finish_normal_tlb_miss:
bne 1f
li r11,MAS3_SW|MAS3_UW
andc r15,r15,r11
-1: mtspr SPRN_MAS7_MAS3,r15
+1:
+BEGIN_MMU_FTR_SECTION
+ srdi r16,r15,32
+ mtspr SPRN_MAS3,r15
+ mtspr SPRN_MAS7,r16
+MMU_FTR_SECTION_ELSE
+ mtspr SPRN_MAS7_MAS3,r15
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -311,11 +322,13 @@ virt_page_table_tlb_miss:
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
1:
+BEGIN_MMU_FTR_SECTION
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
*/
PPC_TLBSRX_DOT(0,r16)
beq virt_page_table_tlb_miss_done
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
/* Now, we need to walk the page tables. First check if we are in
* range.
@@ -367,7 +380,14 @@ virt_page_table_tlb_miss:
*/
clrldi r11,r15,4 /* remove region ID from RPN */
ori r10,r11,1 /* Or-in SR */
+
+BEGIN_MMU_FTR_SECTION
+ srdi r16,r10,32
+ mtspr SPRN_MAS3,r10
+ mtspr SPRN_MAS7,r16
+MMU_FTR_SECTION_ELSE
mtspr SPRN_MAS7_MAS3,r10
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -618,7 +638,14 @@ htw_tlb_miss:
#else
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
#endif
+
+BEGIN_MMU_FTR_SECTION
+ srdi r16,r10,32
+ mtspr SPRN_MAS3,r10
+ mtspr SPRN_MAS7,r16
+MMU_FTR_SECTION_ELSE
mtspr SPRN_MAS7_MAS3,r10
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -700,7 +727,14 @@ tlb_load_linear:
clrrdi r10,r16,30 /* 1G page index */
clrldi r10,r10,4 /* clear region bits */
ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
+
+BEGIN_MMU_FTR_SECTION
+ srdi r16,r10,32
+ mtspr SPRN_MAS3,r10
+ mtspr SPRN_MAS7,r16
+MMU_FTR_SECTION_ELSE
mtspr SPRN_MAS7_MAS3,r10
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
--
1.6.0.6
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 2/5] powerpc/book3e-64: Move the default cpu table entry
2009-08-19 5:08 [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers Kumar Gala
@ 2009-08-19 5:08 ` Kumar Gala
2009-08-19 5:08 ` [PATCH 3/5] powerpc/book3e-64: Wait til generic_calibrate_decr to enable decrementer Kumar Gala
2009-08-19 7:25 ` [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers Benjamin Herrenschmidt
1 sibling, 1 reply; 10+ messages in thread
From: Kumar Gala @ 2009-08-19 5:08 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
Move the default cpu entry table for CONFIG_PPC_BOOK3E_64 to the
very end since we will probably want to support both 32-bit and
64-bit kernels for some processors that are higher up in the list.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
---
arch/powerpc/kernel/cputable.c | 49 ++++++++++++++++++++++------------------
1 files changed, 27 insertions(+), 22 deletions(-)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9f38ecb..0b9c913 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -89,8 +89,12 @@ extern void __restore_cpu_power7(void);
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP)
+#ifdef CONFIG_PPC_BOOK3E_64
+#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
+#else
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE)
+#endif
static struct cpu_spec __initdata cpu_specs[] = {
#ifdef CONFIG_PPC_BOOK3S_64
@@ -509,28 +513,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.platform = "power4",
}
#endif /* CONFIG_PPC_BOOK3S_64 */
-#ifdef CONFIG_PPC_BOOK3E_64
- { /* This is a default entry to get going, to be replaced by
- * a real one at some stage
- */
-#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
- CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "Book3E",
- .cpu_features = CPU_FTRS_BASE_BOOK3E,
- .cpu_user_features = COMMON_USER_PPC64,
- .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
- MMU_FTR_USE_TLBIVAX_BCAST |
- MMU_FTR_LOCK_BCAST_INVAL,
- .icache_bsize = 64,
- .dcache_bsize = 64,
- .num_pmcs = 0,
- .machine_check = machine_check_generic,
- .platform = "power6",
- },
-#endif
#ifdef CONFIG_PPC32
#if CLASSIC_PPC
@@ -1846,6 +1828,29 @@ static struct cpu_spec __initdata cpu_specs[] = {
}
#endif /* CONFIG_E500 */
#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC_BOOK3E_64
+ { /* This is a default entry to get going, to be replaced by
+ * a real one at some stage
+ */
+#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
+ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "Book3E",
+ .cpu_features = CPU_FTRS_BASE_BOOK3E,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
+ MMU_FTR_USE_TLBIVAX_BCAST |
+ MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 0,
+ .machine_check = machine_check_generic,
+ .platform = "power6",
+ },
+#endif
};
static struct cpu_spec the_cpu_spec;
--
1.6.0.6
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 3/5] powerpc/book3e-64: Wait til generic_calibrate_decr to enable decrementer
2009-08-19 5:08 ` [PATCH 2/5] powerpc/book3e-64: Move the default cpu table entry Kumar Gala
@ 2009-08-19 5:08 ` Kumar Gala
2009-08-19 5:08 ` [PATCH 4/5] powerpc/book3e-64: Add helper function to setup IVORs Kumar Gala
0 siblings, 1 reply; 10+ messages in thread
From: Kumar Gala @ 2009-08-19 5:08 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
Match what we do on 32-bit Book-E processors and enable the decrementer
in generic_calibrate_decr. We need to make sure we disable the
decrementer early in boot since we currently use lazy (soft) interrupt
on 64-bit Book-E and possible get a decrementer exception before we
are ready for it.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
---
arch/powerpc/kernel/exceptions-64e.S | 6 ++++--
1 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 695d484..3611b0e 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -774,9 +774,11 @@ _STATIC(init_thread_book3e)
/* Make sure interrupts are off */
wrteei 0
- /* disable watchdog and FIT and enable DEC interrupts */
- lis r3,TCR_DIE@h
+ /* disable all timers and clear out status */
+ li r3,0
mtspr SPRN_TCR,r3
+ mfspr r3,SPRN_TSR
+ mtspr SPRN_TSR,r3
blr
--
1.6.0.6
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 4/5] powerpc/book3e-64: Add helper function to setup IVORs
2009-08-19 5:08 ` [PATCH 3/5] powerpc/book3e-64: Wait til generic_calibrate_decr to enable decrementer Kumar Gala
@ 2009-08-19 5:08 ` Kumar Gala
2009-08-19 5:08 ` [PATCH 5/5] powerpc/book3e-64: Add support to initial_tlb_book3e for non-HES TLB Kumar Gala
0 siblings, 1 reply; 10+ messages in thread
From: Kumar Gala @ 2009-08-19 5:08 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
Not all 64-bit Book-3E parts will have fixed IVORs so add a function that
cpusetup code can call to setup the base IVORs (0..15) to match the fixed
offsets. We need to 'or' part of interrupt_base_book3e into the IVORs
since on parts that have them the IVPR doesn't extend as far down.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
---
arch/powerpc/include/asm/exception-64e.h | 4 ++++
arch/powerpc/kernel/exceptions-64e.S | 19 +++++++++++++++++++
2 files changed, 23 insertions(+), 0 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 94cb3d7..6d53f31 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -196,6 +196,10 @@ exc_##label##_book3e:
#define TLB_MISS_STATS_SAVE_INFO
#endif
+#define SET_IVOR(vector_number, vector_offset) \
+ li r3,vector_offset@l; \
+ ori r3,r3,interrupt_base_book3e@l; \
+ mtspr SPRN_IVOR##vector_number,r3;
#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 3611b0e..662236c 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -782,5 +782,24 @@ _STATIC(init_thread_book3e)
blr
+_GLOBAL(__setup_base_ivors)
+ SET_IVOR(0, 0x020) /* Critical Input */
+ SET_IVOR(1, 0x000) /* Machine Check */
+ SET_IVOR(2, 0x060) /* Data Storage */
+ SET_IVOR(3, 0x080) /* Instruction Storage */
+ SET_IVOR(4, 0x0a0) /* External Input */
+ SET_IVOR(5, 0x0c0) /* Alignment */
+ SET_IVOR(6, 0x0e0) /* Program */
+ SET_IVOR(7, 0x100) /* FP Unavailable */
+ SET_IVOR(8, 0x120) /* System Call */
+ SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */
+ SET_IVOR(10, 0x160) /* Decrementer */
+ SET_IVOR(11, 0x180) /* Fixed Interval Timer */
+ SET_IVOR(12, 0x1a0) /* Watchdog Timer */
+ SET_IVOR(13, 0x1c0) /* Data TLB Error */
+ SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
+ SET_IVOR(15, 0x040) /* Debug */
+ sync
+ blr
--
1.6.0.6
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 5/5] powerpc/book3e-64: Add support to initial_tlb_book3e for non-HES TLB
2009-08-19 5:08 ` [PATCH 4/5] powerpc/book3e-64: Add helper function to setup IVORs Kumar Gala
@ 2009-08-19 5:08 ` Kumar Gala
0 siblings, 0 replies; 10+ messages in thread
From: Kumar Gala @ 2009-08-19 5:08 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
We now search through TLBnCFG looking for the first array that has IPROT
support (we assume that there is only one). If that TLB has hardware
entry select (HES) support we use the existing code and with the proper
TLB select (the HES code still needs to clean up bolted entries from
firmware). The non-HES code is pretty similiar to the 32-bit FSL Book-E
code but does make some new assumtions (like that we have tlbilx) and
simplifies things down a bit.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
---
Ben,
One concern I had with this patch was the fact that I'm using
r5..r8 w/o saving them off in head_64.S.
- k
arch/powerpc/include/asm/reg_booke.h | 2 +
arch/powerpc/kernel/exceptions-64e.S | 204 +++++++++++++++++++++++++++++++++-
2 files changed, 202 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2c9c706..e204de6 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -108,6 +108,8 @@
#define SPRN_PID2 0x27A /* Process ID Register 2 */
#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */
+#define SPRN_TLB2CFG 0x2B2 /* TLB 2 Config Register */
+#define SPRN_TLB3CFG 0x2B3 /* TLB 3 Config Register */
#define SPRN_EPR 0x2BE /* External Proxy Register */
#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */
#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 662236c..9048f96 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -616,18 +616,214 @@ bad_stack_book3e:
* Setup the initial TLB for a core. This current implementation
* assume that whatever we are running off will not conflict with
* the new mapping at PAGE_OFFSET.
- * We also make various assumptions about the processor we run on,
- * this might have to be made more flexible based on the content
- * of MMUCFG and friends.
*/
_GLOBAL(initial_tlb_book3e)
+ /* Look for the first TLB with IPROT set */
+ mfspr r4,SPRN_TLB0CFG
+ andi. r3,r4,TLBnCFG_IPROT
+ lis r3,MAS0_TLBSEL(0)@h
+ bne found_iprot
+
+ mfspr r4,SPRN_TLB1CFG
+ andi. r3,r4,TLBnCFG_IPROT
+ lis r3,MAS0_TLBSEL(1)@h
+ bne found_iprot
+
+ mfspr r4,SPRN_TLB2CFG
+ andi. r3,r4,TLBnCFG_IPROT
+ lis r3,MAS0_TLBSEL(2)@h
+ bne found_iprot
+
+ lis r3,MAS0_TLBSEL(3)@h
+ mfspr r4,SPRN_TLB3CFG
+ /* fall through */
+
+found_iprot:
+ andi. r5,r4,TLBnCFG_HES
+ bne have_hes
+
+ mflr r8 /* save LR */
+/* 1. Find the index of the entry we're executing in
+ *
+ * r3 = MAS0_TLBSEL (for the iprot array)
+ * r4 = SPRN_TLBnCFG
+ */
+ bl invstr /* Find our address */
+invstr: mflr r6 /* Make it accessible */
+ mfmsr r7
+ rlwinm r5,r7,27,31,31 /* extract MSR[IS] */
+ mfspr r7,SPRN_PID
+ slwi r7,r7,16
+ or r7,r7,r5
+ mtspr SPRN_MAS6,r7
+ tlbsx 0,r6 /* search MSR[IS], SPID=PID */
+
+ mfspr r3,SPRN_MAS0
+ rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */
+
+ mfspr r7,SPRN_MAS1 /* Insure IPROT set */
+ oris r7,r7,MAS1_IPROT@h
+ mtspr SPRN_MAS1,r7
+ tlbwe
+
+/* 2. Invalidate all entries except the entry we're executing in
+ *
+ * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
+ * r4 = SPRN_TLBnCFG
+ * r5 = ESEL of entry we are running in
+ */
+ andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */
+ li r6,0 /* Set Entry counter to 0 */
+1: mr r7,r3 /* Set MAS0(TLBSEL) */
+ rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
+ mtspr SPRN_MAS0,r7
+ tlbre
+ mfspr r7,SPRN_MAS1
+ rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
+ cmpw r5,r6
+ beq skpinv /* Dont update the current execution TLB */
+ mtspr SPRN_MAS1,r7
+ tlbwe
+ isync
+skpinv: addi r6,r6,1 /* Increment */
+ cmpw r6,r4 /* Are we done? */
+ bne 1b /* If not, repeat */
+
+ /* Invalidate all TLBs */
+ PPC_TLBILX_ALL(0,0)
+ sync
+ isync
+
+/* 3. Setup a temp mapping and jump to it
+ *
+ * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
+ * r5 = ESEL of entry we are running in
+ */
+ andi. r7,r5,0x1 /* Find an entry not used and is non-zero */
+ addi r7,r7,0x1
+ mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
+ mtspr SPRN_MAS0,r4
+ tlbre
+
+ rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */
+ mtspr SPRN_MAS0,r4
+
+ mfspr r7,SPRN_MAS1
+ xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */
+ mtspr SPRN_MAS1,r6
+
+ tlbwe
+
+ mfmsr r6
+ xori r6,r6,MSR_IS
+ mtspr SPRN_SRR1,r6
+ bl 1f /* Find our address */
+1: mflr r6
+ addi r6,r6,(2f - 1b)
+ mtspr SPRN_SRR0,r6
+ rfi
+2:
+
+/* 4. Clear out PIDs & Search info
+ *
+ * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
+ * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
+ * r5 = MAS3
+ */
+ li r6,0
+ mtspr SPRN_MAS6,r6
+ mtspr SPRN_PID,r6
+
+/* 5. Invalidate mapping we started in
+ *
+ * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
+ * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
+ * r5 = MAS3
+ */
+ mtspr SPRN_MAS0,r3
+ tlbre
+ mfspr r6,SPRN_MAS1
+ rlwinm r6,r6,0,2,0 /* clear IPROT */
+ mtspr SPRN_MAS1,r6
+ tlbwe
+
+ /* Invalidate TLB1 */
+ PPC_TLBILX_ALL(0,0)
+ sync
+ isync
+
+/* The mapping only needs to be cache-coherent on SMP */
+#ifdef CONFIG_SMP
+#define M_IF_SMP MAS2_M
+#else
+#define M_IF_SMP 0
+#endif
+
+/* 6. Setup KERNELBASE mapping in TLB[0]
+ *
+ * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
+ * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
+ * r5 = MAS3
+ */
+ rlwinm r3,r3,0,16,3 /* clear ESEL */
+ mtspr SPRN_MAS0,r3
+ lis r6,(MAS1_VALID|MAS1_IPROT)@h
+ ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+ mtspr SPRN_MAS1,r6
+
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP)
+ mtspr SPRN_MAS2,r6
+
+ rlwinm r5,r5,0,0,25
+ ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
+ mtspr SPRN_MAS3,r5
+ li r5,-1
+ rlwinm r5,r5,0,0,25
+
+ tlbwe
+
+/* 7. Jump to KERNELBASE mapping
+ *
+ * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
+ */
+ /* Now we branch the new virtual address mapped by this entry */
+ LOAD_REG_IMMEDIATE(r6,2f)
+ lis r7,MSR_KERNEL@h
+ ori r7,r7,MSR_KERNEL@l
+ mtspr SPRN_SRR0,r6
+ mtspr SPRN_SRR1,r7
+ rfi /* start execution out of TLB1[0] entry */
+2:
+
+/* 8. Clear out the temp mapping
+ *
+ * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
+ */
+ mtspr SPRN_MAS0,r4
+ tlbre
+ mfspr r5,SPRN_MAS1
+ rlwinm r5,r5,0,2,0 /* clear IPROT */
+ mtspr SPRN_MAS1,r5
+ tlbwe
+
+ /* Invalidate TLB1 */
+ PPC_TLBILX_ALL(0,0)
+ sync
+ isync
+
+ /* We translate LR and return */
+ tovirt(r8,r8)
+ mtlr r8
+ blr
+
+have_hes:
/* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
* kernel linear mapping. We also set MAS8 once for all here though
* that will have to be made dependent on whether we are running under
* a hypervisor I suppose.
*/
- li r3,MAS0_HES | MAS0_WQ_ALLWAYS
+ ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS
mtspr SPRN_MAS0,r3
lis r3,(MAS1_VALID | MAS1_IPROT)@h
ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
--
1.6.0.6
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers
2009-08-19 5:08 [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers Kumar Gala
2009-08-19 5:08 ` [PATCH 2/5] powerpc/book3e-64: Move the default cpu table entry Kumar Gala
@ 2009-08-19 7:25 ` Benjamin Herrenschmidt
2009-08-19 21:37 ` Kumar Gala
1 sibling, 1 reply; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-08-19 7:25 UTC (permalink / raw)
To: Kumar Gala; +Cc: linuxppc-dev
On Wed, 2009-08-19 at 00:08 -0500, Kumar Gala wrote:
> Support for TLB reservation (or TLB Write Conditional) and Paired MAS
> registers are optional for a processor implementation so we handle
> them via MMU feature sections.
>
> We currently only used paired MAS registers to access the full RPN + perm
> bits that are kept in MAS7||MAS3. We assume that if an implementation has
> hardware page table at this time it also implements in TLB reservations.
You also need to be careful with this code:
virt_page_table_tlb_miss_done:
/* We have overriden MAS2:EPN but currently our primary TLB miss
* handler will always restore it so that should not be an issue,
* if we ever optimize the primary handler to not write MAS2 on
* some cases, we'll have to restore MAS2:EPN here based on the
* original fault's DEAR. If we do that we have to modify the
* ITLB miss handler to also store SRR0 in the exception frame
* as DEAR.
*
* However, one nasty thing we did is we cleared the reservation
* (well, potentially we did). We do a trick here thus if we
* are not a level 0 exception (we interrupted the TLB miss) we
* offset the return address by -4 in order to replay the tlbsrx
* instruction there
*/
subf r10,r13,r12
cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
bne- 1f
ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
addi r10,r11,-4
std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
You may want to make the 3 last lines conditional on having tlbsrx.
Right now, in the no-tlbsrx. case, what happens is that it will go back
to the previous instruction, an or, which hopefully should be harmless
-but- this code is nasty enough you really don't want to take that
sort of chances.
Feel free to add a fat comment next to the ld in the tlbsrx case itself
explaining why those two instructions must be kept together and any
change here must be reflected in the second level handler.
Cheers,
Ben.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers
2009-08-19 7:25 ` [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers Benjamin Herrenschmidt
@ 2009-08-19 21:37 ` Kumar Gala
2009-08-20 0:43 ` Benjamin Herrenschmidt
0 siblings, 1 reply; 10+ messages in thread
From: Kumar Gala @ 2009-08-19 21:37 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
On Aug 19, 2009, at 2:25 AM, Benjamin Herrenschmidt wrote:
> On Wed, 2009-08-19 at 00:08 -0500, Kumar Gala wrote:
>> Support for TLB reservation (or TLB Write Conditional) and Paired MAS
>> registers are optional for a processor implementation so we handle
>> them via MMU feature sections.
>>
>> We currently only used paired MAS registers to access the full RPN
>> + perm
>> bits that are kept in MAS7||MAS3. We assume that if an
>> implementation has
>> hardware page table at this time it also implements in TLB
>> reservations.
>
> You also need to be careful with this code:
>
> virt_page_table_tlb_miss_done:
>
> /* We have overriden MAS2:EPN but currently our primary TLB miss
> * handler will always restore it so that should not be an issue,
> * if we ever optimize the primary handler to not write MAS2 on
> * some cases, we'll have to restore MAS2:EPN here based on the
> * original fault's DEAR. If we do that we have to modify the
> * ITLB miss handler to also store SRR0 in the exception frame
> * as DEAR.
> *
> * However, one nasty thing we did is we cleared the reservation
> * (well, potentially we did). We do a trick here thus if we
> * are not a level 0 exception (we interrupted the TLB miss) we
> * offset the return address by -4 in order to replay the tlbsrx
> * instruction there
> */
> subf r10,r13,r12
> cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
> bne- 1f
> ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
> addi r10,r11,-4
> std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
>
> You may want to make the 3 last lines conditional on having tlbsrx.
The whole thing only ever gets called if we had tlbsrx. so is there
any utility in making a part of conditional on tlbsrx?
> Right now, in the no-tlbsrx. case, what happens is that it will go
> back
> to the previous instruction, an or, which hopefully should be harmless
> -but- this code is nasty enough you really don't want to take that
> sort of chances.
>
> Feel free to add a fat comment next to the ld in the tlbsrx case
> itself
> explaining why those two instructions must be kept together and any
> change here must be reflected in the second level handler.
>
> Cheers,
> Ben.
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers
2009-08-19 21:37 ` Kumar Gala
@ 2009-08-20 0:43 ` Benjamin Herrenschmidt
2009-08-24 16:12 ` Kumar Gala
0 siblings, 1 reply; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-08-20 0:43 UTC (permalink / raw)
To: Kumar Gala; +Cc: linuxppc-dev
On Wed, 2009-08-19 at 16:37 -0500, Kumar Gala wrote:
> On Aug 19, 2009, at 2:25 AM, Benjamin Herrenschmidt wrote:
> The whole thing only ever gets called if we had tlbsrx. so is there
> any utility in making a part of conditional on tlbsrx?
I don't think so ... this is the second level TLB miss handler when
the first level takes a hit on the virtually linear page tables, I
has nothing to do with tlbsrx... however, it does offset the return
address back into the first level handler by -4 to account for
replaying the tlbsrx instruction which you probably don't want to do.
Ben.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers
2009-08-20 0:43 ` Benjamin Herrenschmidt
@ 2009-08-24 16:12 ` Kumar Gala
2009-08-25 1:08 ` Benjamin Herrenschmidt
0 siblings, 1 reply; 10+ messages in thread
From: Kumar Gala @ 2009-08-24 16:12 UTC (permalink / raw)
To: Benjamin Herrenschmidt; +Cc: linuxppc-dev
On Aug 19, 2009, at 7:43 PM, Benjamin Herrenschmidt wrote:
> On Wed, 2009-08-19 at 16:37 -0500, Kumar Gala wrote:
>> On Aug 19, 2009, at 2:25 AM, Benjamin Herrenschmidt wrote:
>
>> The whole thing only ever gets called if we had tlbsrx. so is there
>> any utility in making a part of conditional on tlbsrx?
>
> I don't think so ... this is the second level TLB miss handler when
> the first level takes a hit on the virtually linear page tables, I
> has nothing to do with tlbsrx... however, it does offset the return
> address back into the first level handler by -4 to account for
> replaying the tlbsrx instruction which you probably don't want to do.
Duh. Wasn't looking at the fall through.
But is there any reason to even have any of the 6 instructions in the
'virt_page_table_tlb_miss_done' path if we don't have TLBSRX?
- k
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers
2009-08-24 16:12 ` Kumar Gala
@ 2009-08-25 1:08 ` Benjamin Herrenschmidt
0 siblings, 0 replies; 10+ messages in thread
From: Benjamin Herrenschmidt @ 2009-08-25 1:08 UTC (permalink / raw)
To: Kumar Gala; +Cc: linuxppc-dev
On Mon, 2009-08-24 at 11:12 -0500, Kumar Gala wrote:
> Duh. Wasn't looking at the fall through.
>
> But is there any reason to even have any of the 6 instructions in
> the
> 'virt_page_table_tlb_miss_done' path if we don't have TLBSRX?
>
No, that's what I said in my initial email :-) You can probably
"alternate out" that whole thing.
Cheers,
Ben.
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2009-08-25 1:08 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-08-19 5:08 [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers Kumar Gala
2009-08-19 5:08 ` [PATCH 2/5] powerpc/book3e-64: Move the default cpu table entry Kumar Gala
2009-08-19 5:08 ` [PATCH 3/5] powerpc/book3e-64: Wait til generic_calibrate_decr to enable decrementer Kumar Gala
2009-08-19 5:08 ` [PATCH 4/5] powerpc/book3e-64: Add helper function to setup IVORs Kumar Gala
2009-08-19 5:08 ` [PATCH 5/5] powerpc/book3e-64: Add support to initial_tlb_book3e for non-HES TLB Kumar Gala
2009-08-19 7:25 ` [PATCH 1/5] powerpc/mm: Add MMU features for TLB reservation & Paired MAS registers Benjamin Herrenschmidt
2009-08-19 21:37 ` Kumar Gala
2009-08-20 0:43 ` Benjamin Herrenschmidt
2009-08-24 16:12 ` Kumar Gala
2009-08-25 1:08 ` Benjamin Herrenschmidt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).