linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Jon Tollefson <kniht@us.ibm.com>
To: linuxppc-dev <linuxppc-dev@ozlabs.org>,
	Linux Memory Management List <linux-mm@kvack.org>
Subject: [PATCH 2/2] powerpc: make 64K huge pages more reliable
Date: Tue, 27 Nov 2007 23:03:16 -0600	[thread overview]
Message-ID: <474CF694.8040700@us.ibm.com> (raw)

This patch adds reliability to the 64K huge page option by making use of 
the PMD for 64K huge pages when base pages are 4k.  So instead of a 12 
bit pte it would be 7 bit pmd and a 5 bit pte. The pgd and pud offsets 
would continue as 9 bits and 7 bits respectively.  This will allow the 
pgtable to fit in one base page.  This patch would have to be applied 
after part 1.

Signed-off-by: Jon Tollefson <kniht@linux.vnet.ibm.com>
---

 hugetlbpage.c |   53 ++++++++++++++++++++++++++++++++++++++---------------
 1 files changed, 38 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f4632ad..c6df45b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -30,15 +30,11 @@
 #define NUM_LOW_AREAS	(0x100000000UL >> SID_SHIFT)
 #define NUM_HIGH_AREAS	(PGTABLE_RANGE >> HTLB_AREA_SHIFT)
 
-#ifdef CONFIG_PPC_64K_PAGES
-#define HUGEPTE_INDEX_SIZE	(PMD_SHIFT-HPAGE_SHIFT)
-#else
-#define HUGEPTE_INDEX_SIZE	(PUD_SHIFT-HPAGE_SHIFT)
-#endif
-#define PTRS_PER_HUGEPTE	(1 << HUGEPTE_INDEX_SIZE)
-#define HUGEPTE_TABLE_SIZE	(sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
+unsigned int hugepte_shift;
+#define PTRS_PER_HUGEPTE	(1 << hugepte_shift)
+#define HUGEPTE_TABLE_SIZE	(sizeof(pte_t) << hugepte_shift)
 
-#define HUGEPD_SHIFT		(HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
+#define HUGEPD_SHIFT		(HPAGE_SHIFT + hugepte_shift)
 #define HUGEPD_SIZE		(1UL << HUGEPD_SHIFT)
 #define HUGEPD_MASK		(~(HUGEPD_SIZE-1))
 
@@ -105,7 +101,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 			if (!pmd_none(*pm))
 				return hugepte_offset((hugepd_t *)pm, addr);
 #else
-			return hugepte_offset((hugepd_t *)pu, addr);
+			if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
+				pmd_t *pm;
+				pm = pmd_offset(pu, addr);
+				if (!pmd_none(*pm))
+					return hugepte_offset((hugepd_t *)pm, addr);
+			} else {
+				return hugepte_offset((hugepd_t *)pu, addr);
+			}
 #endif
 		}
 	}
@@ -133,7 +136,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 		if (pm)
 			hpdp = (hugepd_t *)pm;
 #else
-		hpdp = (hugepd_t *)pu;
+		if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
+			pmd_t *pm;
+			pm = pmd_alloc(mm, pu, addr);
+			if (pm)
+				hpdp = (hugepd_t *)pm;
+		} else {
+			hpdp = (hugepd_t *)pu;
+		}
 #endif
 	}
 
@@ -161,7 +171,6 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
 						 PGF_CACHENUM_MASK));
 }
 
-#ifdef CONFIG_PPC_64K_PAGES
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 				   unsigned long addr, unsigned long end,
 				   unsigned long floor, unsigned long ceiling)
@@ -194,7 +203,6 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 	pud_clear(pud);
 	pmd_free_tlb(tlb, pmd);
 }
-#endif
 
 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 				   unsigned long addr, unsigned long end,
@@ -213,9 +221,15 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 			continue;
 		hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
 #else
-		if (pud_none(*pud))
-			continue;
-		free_hugepte_range(tlb, (hugepd_t *)pud);
+		if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
+			if (pud_none_or_clear_bad(pud))
+				continue;
+			hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
+		} else {
+			if (pud_none(*pud))
+				continue;
+			free_hugepte_range(tlb, (hugepd_t *)pud);
+		}
 #endif
 	} while (pud++, addr = next, addr != end);
 
@@ -538,6 +552,15 @@ void set_huge_psize(int psize)
 			mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) {
 		HPAGE_SHIFT = mmu_psize_defs[psize].shift;
 		mmu_huge_psize = psize;
+#ifdef CONFIG_PPC_64K_PAGES
+		hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
+#else
+		if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
+			hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
+		else
+			hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
+#endif
+
 	} else
 		HPAGE_SHIFT = 0;
 }

             reply	other threads:[~2007-11-28  5:03 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-11-28  5:03 Jon Tollefson [this message]
2007-12-03  2:06 ` [PATCH 2/2] powerpc: make 64K huge pages more reliable David Gibson
2007-12-03 21:33   ` Jon Tollefson
2007-12-03 21:51   ` Chris Snook
2007-12-04  0:28     ` David Gibson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=474CF694.8040700@us.ibm.com \
    --to=kniht@us.ibm.com \
    --cc=kniht@linux.vnet.ibm.com \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@ozlabs.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).