linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: jeremy.linton@arm.com (Jeremy Linton)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 7/7] arm64: Mark kernel page ranges contiguous
Date: Wed,  7 Oct 2015 12:00:25 -0500	[thread overview]
Message-ID: <1444237225-13802-8-git-send-email-jeremy.linton@arm.com> (raw)
In-Reply-To: <1444237225-13802-1-git-send-email-jeremy.linton@arm.com>

With 64k pages, the next larger segment size is 512M. The linux
kernel also uses different protection flags to cover its code and data.
Because of this requirement, the vast majority of the kernel code and
data structures end up being mapped with 64k pages instead of the larger
pages common with a 4k page kernel.

Recent ARM processors support a contiguous bit in the
page tables which allows the a TLB to cover a range larger than a
single PTE if that range is mapped into physically contiguous
ram.

So, for the kernel its a good idea to set this flag. Some basic
micro benchmarks show it can significantly reduce the number of
L1 dTLB refills.

Add boot option to enable/disable CONT marking, as well as fix a
bug found by Steve Capper.

Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
---
 arch/arm64/mm/mmu.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 72 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 9211b85..031ea7a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -69,6 +69,10 @@ static void __init *early_alloc(unsigned long sz)
 	return ptr;
 }
 
+#ifndef CONFIG_ARM64_CONT_PTE
+#define CONFIG_ARM64_CONT_PTE 0
+#endif
+
 /*
  * remap a PMD into pages
  */
@@ -80,19 +84,58 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
 	do {
 		/*
 		 * Need to have the least restrictive permissions available
-		 * permissions will be fixed up later
+		 * permissions will be fixed up later. Default the new page
+		 * range as contiguous ptes.
 		 */
-		set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+		if (CONFIG_ARM64_CONT_PTE)
+			set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
+		else
+			set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
 		pfn++;
 	} while (pte++, i++, i < PTRS_PER_PTE);
 }
 
+/*
+ * Given a PTE with the CONT bit set, determine where the CONT range
+ * starts, and clear the entire range of PTE CONT bits.
+ */
+static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
+{
+	int i;
+
+	pte -= CONT_RANGE_OFFSET(addr);
+	for (i = 0; i < CONT_PTES; i++) {
+		set_pte(pte, pte_mknoncont(*pte));
+		pte++;
+	}
+	flush_tlb_all();
+}
+
+/*
+ * Given a range of PTEs set the pfn and provided page protection flags
+ */
+static void __populate_init_pte(pte_t *pte, unsigned long addr,
+				unsigned long end, phys_addr_t phys,
+				pgprot_t prot)
+{
+	unsigned long pfn = __phys_to_pfn(phys);
+
+	do {
+		/* clear all the bits except the pfn, then apply the prot */
+		set_pte(pte, pfn_pte(pfn, prot));
+		pte++;
+		pfn++;
+		addr += PAGE_SIZE;
+	} while (addr != end);
+}
+
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
-				  unsigned long end, unsigned long pfn,
+				  unsigned long end, phys_addr_t phys,
 				  pgprot_t prot,
 				  void *(*alloc)(unsigned long size))
 {
 	pte_t *pte;
+	unsigned long next;
 
 	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
 		pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -104,10 +147,31 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 	BUG_ON(pmd_bad(*pmd));
 
 	pte = pte_offset_kernel(pmd, addr);
-	do {
-		set_pte(pte, pfn_pte(pfn, prot));
-		pfn++;
-	} while (pte++, addr += PAGE_SIZE, addr != end);
+	if (!CONFIG_ARM64_CONT_PTE)
+		__populate_init_pte(pte, addr, end, phys, prot);
+	else
+	    do {
+		next = min(end, (addr + CONT_SIZE) & CONT_MASK);
+		if (((addr | next | phys) & ~CONT_MASK) == 0) {
+			/* a block of CONT_PTES  */
+			__populate_init_pte(pte, addr, next, phys,
+					    prot | __pgprot(PTE_CONT));
+		} else {
+			/*
+			 * If the range being split is already inside of a
+			 * contiguous range but this PTE isn't going to be
+			 * contiguous, then we want to unmark the adjacent
+			 * ranges, then update the portion of the range we
+			 * are interrested in.
+			 */
+			 clear_cont_pte_range(pte, addr);
+			 __populate_init_pte(pte, addr, next, phys, prot);
+		}
+
+		pte += (next - addr) >> PAGE_SHIFT;
+		phys += next - addr;
+		addr = next;
+	} while (addr != end);
 }
 
 void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -168,8 +232,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
 				}
 			}
 		} else {
-			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
-				       prot, alloc);
+			alloc_init_pte(pmd, addr, next, phys, prot, alloc);
 		}
 		phys += next - addr;
 	} while (pmd++, addr = next, addr != end);
-- 
2.4.3

  parent reply	other threads:[~2015-10-07 17:00 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-10-07 17:00 [PATCHv3 0/7] arm64: Use contiguous PTE bit for the kernel linear mapping Jeremy Linton
2015-10-07 17:00 ` [PATCH 1/7] arm64: Add contiguous page flag shifts and constants Jeremy Linton
2015-10-07 17:00 ` [PATCH 2/7] arm64: PTE/PMD contiguous bit definition Jeremy Linton
2015-10-07 17:00 ` [PATCH 3/7] arm64: Macros to check/set/unset the contiguous bit Jeremy Linton
2015-10-07 17:00 ` [PATCH 4/7] arm64: Default kernel pages should be contiguous Jeremy Linton
2015-10-07 17:00 ` [PATCH 5/7] arm64: Make the kernel page dump utility aware of the CONT bit Jeremy Linton
2015-10-07 17:00 ` [PATCH 6/7] arm64: Add a kernel Kconfig option to enable contiguous PTE marking Jeremy Linton
2015-10-07 17:00 ` Jeremy Linton [this message]
2015-10-09 12:50 ` [PATCHv3 0/7] arm64: Use contiguous PTE bit for the kernel linear mapping Catalin Marinas
2015-10-09 15:00   ` Jeremy Linton
2015-11-17 15:37     ` Jeremy Linton
2015-11-18 11:01       ` Will Deacon
2015-11-18 14:10         ` Jeremy Linton
  -- strict thread matches above, loose matches on Subject: below --
2015-09-16 19:02 [PATCH 0/7] arm64: Use contigous " Jeremy Linton
2015-09-16 19:03 ` [PATCH 7/7] arm64: Mark kernel page ranges contiguous Jeremy Linton
2015-09-17 17:23   ` Steve Capper
2015-09-17 17:33     ` Jeremy Linton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1444237225-13802-8-git-send-email-jeremy.linton@arm.com \
    --to=jeremy.linton@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).