linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Anshuman Khandual <khandual@linux.vnet.ibm.com>
To: linuxppc-dev@ozlabs.org
Cc: mikey@neuling.org, mpe@ellerman.id.au
Subject: [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization
Date: Tue, 21 Jul 2015 12:28:42 +0530	[thread overview]
Message-ID: <1437461926-8908-4-git-send-email-khandual@linux.vnet.ibm.com> (raw)
In-Reply-To: <1437461926-8908-1-git-send-email-khandual@linux.vnet.ibm.com>

From: "khandual@linux.vnet.ibm.com" <khandual@linux.vnet.ibm.com>

This patch adds the following helper functions to improve modularization
and readability of the code.

(1) slb_invalid_all: 		Invalidates entire SLB
(2) slb_invalid_paca_slots: 	Invalidate SLB entries present in PACA
(3) kernel_linear_vsid_flags:	VSID flags for kernel linear mapping
(4) kernel_virtual_vsid_flags:	VSID flags for kernel virtual mapping

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb.c | 87 ++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 59 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cbeaaa2..dcba4c2 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -94,18 +94,37 @@ static inline void new_shadowed_slbe(unsigned long ea, int ssize,
 		     : "memory" );
 }
 
+static inline unsigned long kernel_linear_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
+}
+
+static inline unsigned long kernel_virtual_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+}
+
+static inline unsigned long kernel_io_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long kernel_vmemmap_vsid_flags(void)
+{
+	return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
+}
+#endif
+
 static void __slb_flush_and_rebolt(void)
 {
 	/* If you change this make sure you change SLB_NUM_BOLTED
 	 * and PR KVM appropriately too. */
-	unsigned long linear_llp, vmalloc_llp, lflags, vflags;
+	unsigned long lflags, vflags;
 	unsigned long ksp_esid_data, ksp_vsid_data;
 
-	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
-	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
-	lflags = SLB_VSID_KERNEL | linear_llp;
-	vflags = SLB_VSID_KERNEL | vmalloc_llp;
-
+	lflags = kernel_linear_vsid_flags();
+	vflags = kernel_virtual_vsid_flags();
 	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, SLOT_KSTACK);
 	if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
 		ksp_esid_data &= ~SLB_ESID_V;
@@ -153,7 +172,7 @@ void slb_vmalloc_update(void)
 {
 	unsigned long vflags;
 
-	vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+	vflags = kernel_virtual_vsid_flags();
 	slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
 	slb_flush_and_rebolt();
 }
@@ -187,6 +206,23 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
 	return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
 }
 
+static void slb_invalid_paca_slots(unsigned long offset)
+{
+	unsigned long slbie_data;
+	int i;
+
+	asm volatile("isync" : : : "memory");
+	for (i = 0; i < offset; i++) {
+		slbie_data = (unsigned long)get_paca()->slb_cache[i]
+			<< SID_SHIFT; /* EA */
+		slbie_data |= user_segment_size(slbie_data)
+			<< SLBIE_SSIZE_SHIFT;
+		slbie_data |= SLBIE_C; /* C set for user addresses */
+		asm volatile("slbie %0" : : "r" (slbie_data));
+	}
+	asm volatile("isync" : : : "memory");
+}
+
 /* Flush all user entries from the segment table of the current processor. */
 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 {
@@ -206,17 +242,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 	offset = get_paca()->slb_cache_ptr;
 	if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
 	    offset <= SLB_CACHE_ENTRIES) {
-		int i;
-		asm volatile("isync" : : : "memory");
-		for (i = 0; i < offset; i++) {
-			slbie_data = (unsigned long)get_paca()->slb_cache[i]
-				<< SID_SHIFT; /* EA */
-			slbie_data |= user_segment_size(slbie_data)
-				<< SLBIE_SSIZE_SHIFT;
-			slbie_data |= SLBIE_C; /* C set for user addresses */
-			asm volatile("slbie %0" : : "r" (slbie_data));
-		}
-		asm volatile("isync" : : : "memory");
+		slb_invalid_paca_slots(offset);
 	} else {
 		__slb_flush_and_rebolt();
 	}
@@ -256,6 +282,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
 	patch_instruction(insn_addr, insn);
 }
 
+/* Invalidate the entire SLB (even slot 0) & all the ERATS */
+static inline void slb_invalid_all(void)
+{
+	asm volatile("isync":::"memory");
+	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
+	asm volatile("isync; slbia; isync":::"memory");
+}
+
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
@@ -283,16 +317,16 @@ void slb_initialize(void)
 	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
 	io_llp = mmu_psize_defs[mmu_io_psize].sllp;
 	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
-	get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+	get_paca()->vmalloc_sllp = kernel_virtual_vsid_flags();
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 	vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
 #endif
 	if (!slb_encoding_inited) {
 		slb_encoding_inited = 1;
 		patch_slb_encoding(slb_miss_kernel_load_linear,
-				   SLB_VSID_KERNEL | linear_llp);
+				   kernel_linear_vsid_flags());
 		patch_slb_encoding(slb_miss_kernel_load_io,
-				   SLB_VSID_KERNEL | io_llp);
+				   kernel_io_vsid_flags());
 		patch_slb_encoding(slb_compare_rr_to_size,
 				   mmu_slb_size);
 
@@ -301,20 +335,17 @@ void slb_initialize(void)
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 		patch_slb_encoding(slb_miss_kernel_load_vmemmap,
-				   SLB_VSID_KERNEL | vmemmap_llp);
+				   kernel_vmemmap_vsid_flags());
 		pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
 #endif
 	}
 
 	get_paca()->stab_rr = SLB_NUM_BOLTED;
 
-	lflags = SLB_VSID_KERNEL | linear_llp;
-	vflags = SLB_VSID_KERNEL | vmalloc_llp;
+	lflags = kernel_linear_vsid_flags();
+	vflags = kernel_virtual_vsid_flags();
 
-	/* Invalidate the entire SLB (even slot 0) & all the ERATS */
-	asm volatile("isync":::"memory");
-	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
-	asm volatile("isync; slbia; isync":::"memory");
+	slb_invalid_all();
 	new_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, SLOT_KLINR);
 	new_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, SLOT_KVIRT);
 
-- 
2.1.0

  parent reply	other threads:[~2015-07-21  6:59 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-21  6:58 [RFC 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
2015-07-21  6:58 ` [RFC 2/8] powerpc/slb: Rename all the 'entry' occurrences to 'slot' Anshuman Khandual
2015-07-21  9:46   ` [RFC, " Michael Ellerman
2015-07-21 11:23     ` [RFC,2/8] " Anshuman Khandual
2015-07-21  6:58 ` [RFC 3/8] powerpc/slb: Define macros for the bolted slots Anshuman Khandual
2015-07-22  9:32   ` Michael Ellerman
2015-07-21  6:58 ` Anshuman Khandual [this message]
2015-07-22  9:19   ` [RFC 4/8] powerpc/slb: Add some helper functions to improve modularization Michael Ellerman
2015-07-21  6:58 ` [RFC 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
2015-07-22  5:51   ` Michael Ellerman
2015-07-22  5:57     ` Gabriel Paubert
2015-07-22  9:01       ` Michael Ellerman
2015-07-22 12:17     ` Segher Boessenkool
2015-07-21  6:58 ` [RFC 6/8] powerpc/prom: Simplify the logic while fetching SLB size Anshuman Khandual
2015-07-21 10:21   ` [RFC, " Michael Ellerman
2015-07-21 11:24     ` Anshuman Khandual
2015-07-21  6:58 ` [RFC 7/8] powerpc/xmon: Drop 'valid' from the condition inside 'dump_segments' Anshuman Khandual
2015-07-21 10:00   ` [RFC, " Michael Ellerman
2015-07-21 11:45     ` Anshuman Khandual
2015-07-22  4:52       ` Michael Ellerman
2015-07-21  6:58 ` [RFC 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
2015-07-21 10:08   ` [RFC, " Michael Ellerman
2015-07-21 11:48     ` Anshuman Khandual

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1437461926-8908-4-git-send-email-khandual@linux.vnet.ibm.com \
    --to=khandual@linux.vnet.ibm.com \
    --cc=linuxppc-dev@ozlabs.org \
    --cc=mikey@neuling.org \
    --cc=mpe@ellerman.id.au \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).