linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86: introduce page_size_mask for 64bit
@ 2008-07-08  8:41 Yinghai Lu
  2008-07-08  8:43 ` [PATCH] x86: not overmap than end in init_memory_mapping - 64bit Yinghai Lu
  2008-07-09  7:38 ` [PATCH] x86: introduce page_size_mask for 64bit Ingo Molnar
  0 siblings, 2 replies; 84+ messages in thread
From: Yinghai Lu @ 2008-07-08  8:41 UTC (permalink / raw)
  To: Ingo Molnar, Thomas Gleixner, H. Peter Anvin; +Cc: LKML


prepare for overmapped patch

also printout last_map_addr together with end

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>

---
 arch/x86/mm/init_64.c |   98 ++++++++++++++++++++++++++++++++------------------
 1 file changed, 63 insertions(+), 35 deletions(-)

Index: linux-2.6/arch/x86/mm/init_64.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/init_64.c
+++ linux-2.6/arch/x86/mm/init_64.c
@@ -300,7 +300,8 @@ phys_pte_update(pmd_t *pmd, unsigned lon
 }
 
 static unsigned long __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+			 unsigned long page_size_mask)
 {
 	unsigned long pages = 0;
 
@@ -325,7 +326,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
 			continue;
 		}
 
-		if (cpu_has_pse) {
+		if (page_size_mask & (1<<PG_LEVEL_2M)) {
 			pages++;
 			set_pte((pte_t *)pmd,
 				pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
@@ -343,20 +344,22 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
 }
 
 static unsigned long __meminit
-phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
+			 unsigned long page_size_mask)
 {
 	pmd_t *pmd = pmd_offset(pud, 0);
 	unsigned long last_map_addr;
 
 	spin_lock(&init_mm.page_table_lock);
-	last_map_addr = phys_pmd_init(pmd, address, end);
+	last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
 	spin_unlock(&init_mm.page_table_lock);
 	__flush_tlb_all();
 	return last_map_addr;
 }
 
 static unsigned long __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
+phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+			 unsigned long page_size_mask)
 {
 	unsigned long pages = 0;
 	unsigned long last_map_addr = end;
@@ -378,11 +381,12 @@ phys_pud_init(pud_t *pud_page, unsigned
 
 		if (pud_val(*pud)) {
 			if (!pud_large(*pud))
-				last_map_addr = phys_pmd_update(pud, addr, end);
+				last_map_addr = phys_pmd_update(pud, addr, end,
+							 page_size_mask);
 			continue;
 		}
 
-		if (direct_gbpages) {
+		if (page_size_mask & (1<<PG_LEVEL_1G)) {
 			pages++;
 			set_pte((pte_t *)pud,
 				pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
@@ -393,7 +397,7 @@ phys_pud_init(pud_t *pud_page, unsigned
 		pmd = alloc_low_page(&pmd_phys);
 
 		spin_lock(&init_mm.page_table_lock);
-		last_map_addr = phys_pmd_init(pmd, addr, end);
+		last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
 		unmap_low_page(pmd);
 		pud_populate(&init_mm, pud, __va(pmd_phys));
 		spin_unlock(&init_mm.page_table_lock);
@@ -406,13 +410,14 @@ phys_pud_init(pud_t *pud_page, unsigned
 }
 
 static unsigned long __meminit
-phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end)
+phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
+		 unsigned long page_size_mask)
 {
 	pud_t *pud;
 
 	pud = (pud_t *)pgd_page_vaddr(*pgd);
 
-	return phys_pud_init(pud, addr, end);
+	return phys_pud_init(pud, addr, end, page_size_mask);
 }
 
 static void __init find_early_table_space(unsigned long end)
@@ -582,29 +587,12 @@ static void __init early_memtest(unsigne
 }
 #endif
 
-/*
- * Setup the direct mapping of the physical memory at PAGE_OFFSET.
- * This runs before bootmem is initialized and gets pages directly from
- * the physical memory. To access them they are temporarily mapped.
- */
-unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+static unsigned long __init kernel_physical_mapping_init(unsigned long start,
+						unsigned long end,
+						unsigned long page_size_mask)
 {
-	unsigned long next, last_map_addr = end;
-	unsigned long start_phys = start, end_phys = end;
 
-	printk(KERN_INFO "init_memory_mapping\n");
-
-	/*
-	 * Find space for the kernel direct mapping tables.
-	 *
-	 * Later we should allocate these tables in the local node of the
-	 * memory mapped. Unfortunately this is done currently before the
-	 * nodes are discovered.
-	 */
-	if (!after_bootmem) {
-		init_gbpages();
-		find_early_table_space(end);
-	}
+	unsigned long next, last_map_addr = end;
 
 	start = (unsigned long)__va(start);
 	end = (unsigned long)__va(end);
@@ -619,7 +607,8 @@ unsigned long __init_refok init_memory_m
 			next = end;
 
 		if (pgd_val(*pgd)) {
-			last_map_addr = phys_pud_update(pgd, __pa(start), __pa(end));
+			last_map_addr = phys_pud_update(pgd, __pa(start),
+						 __pa(end), page_size_mask);
 			continue;
 		}
 
@@ -628,22 +617,61 @@ unsigned long __init_refok init_memory_m
 		else
 			pud = alloc_low_page(&pud_phys);
 
-		last_map_addr = phys_pud_init(pud, __pa(start), __pa(next));
+		last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
+						 page_size_mask);
 		unmap_low_page(pud);
 		pgd_populate(&init_mm, pgd_offset_k(start),
 			     __va(pud_phys));
 	}
 
+	return last_map_addr;
+}
+/*
+ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ * This runs before bootmem is initialized and gets pages directly from
+ * the physical memory. To access them they are temporarily mapped.
+ */
+unsigned long __init_refok init_memory_mapping(unsigned long start,
+					       unsigned long end)
+{
+	unsigned long last_map_addr;
+	unsigned long page_size_mask = 0;
+
+	printk(KERN_INFO "init_memory_mapping\n");
+
+	/*
+	 * Find space for the kernel direct mapping tables.
+	 *
+	 * Later we should allocate these tables in the local node of the
+	 * memory mapped. Unfortunately this is done currently before the
+	 * nodes are discovered.
+	 */
+	if (!after_bootmem) {
+		init_gbpages();
+		find_early_table_space(end);
+	}
+
+	if (direct_gbpages)
+		page_size_mask |= 1 << PG_LEVEL_1G;
+	if (cpu_has_pse)
+		page_size_mask |= 1 << PG_LEVEL_2M;
+
+	last_map_addr = kernel_physical_mapping_init(start, end,
+							 page_size_mask);
+
 	if (!after_bootmem)
 		mmu_cr4_features = read_cr4();
 	__flush_tlb_all();
 
-	if (!after_bootmem)
+	if (!after_bootmem && table_end > table_start)
 		reserve_early(table_start << PAGE_SHIFT,
 				 table_end << PAGE_SHIFT, "PGTABLE");
 
+	printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
+			 last_map_addr, end);
+
 	if (!after_bootmem)
-		early_memtest(start_phys, end_phys);
+		early_memtest(start, end);
 
 	return last_map_addr >> PAGE_SHIFT;
 }

^ permalink raw reply	[flat|nested] 84+ messages in thread

end of thread, other threads:[~2008-07-18 17:08 UTC | newest]

Thread overview: 84+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-07-08  8:41 [PATCH] x86: introduce page_size_mask for 64bit Yinghai Lu
2008-07-08  8:43 ` [PATCH] x86: not overmap than end in init_memory_mapping - 64bit Yinghai Lu
2008-07-09  7:38   ` Ingo Molnar
2008-07-09  8:34     ` Ingo Molnar
2008-07-09  8:37       ` Yinghai Lu
2008-07-09  8:46         ` Ingo Molnar
2008-07-09  8:58           ` Yinghai Lu
2008-07-09 10:01           ` Yinghai Lu
2008-07-09 10:30             ` Ingo Molnar
2008-07-09  8:45       ` Ingo Molnar
2008-07-10  3:15   ` [PATCh] x86: overmapped fix when 4K pages on tail " Yinghai Lu
2008-07-10  3:16     ` [PATCH] x86: merge __acpi_map_table Yinghai Lu
2008-07-10  3:17       ` [PATCH] x86: make e820_end return end_of_ram again for 64bit Yinghai Lu
2008-07-10  7:00         ` Ingo Molnar
2008-07-10 11:17         ` [PATCH] x86: e820 remove the range instead of update it to reserved Yinghai Lu
2008-07-11  8:20           ` Ingo Molnar
2008-07-11  3:36         ` [PATCH] x86: save slit Yinghai Lu
2008-07-11  8:22           ` Ingo Molnar
2008-07-11  3:38         ` [PATCH] x86: introduce max_low_pfn_mapped for 64bit Yinghai Lu
2008-07-11  8:26           ` Ingo Molnar
2008-07-11  8:39             ` Yinghai Lu
2008-07-11  8:51               ` Ingo Molnar
2008-07-12  1:41           ` [PATCH] x86: let 32bit use apic_ops too Yinghai Lu
2008-07-12  1:43             ` [PATCH] x86: mach_apicdef.h need to include before smp.h Yinghai Lu
2008-07-12  1:44               ` [PATCH] x86: make read_apic_id return final apicid Yinghai Lu
2008-07-12  8:01                 ` [PATCH] x86: make 64bit have get_apic_id Yinghai Lu
2008-07-13  6:28                   ` Ingo Molnar
2008-07-13  6:59                     ` Ingo Molnar
2008-07-13  7:05                       ` Yinghai Lu
2008-07-13  9:23                         ` Ingo Molnar
2008-07-13  9:28                           ` Ingo Molnar
2008-07-13 16:15                             ` Suresh Siddha
2008-07-13  1:19                 ` [PATCH] x86: make read_apic_id return final apicid Suresh Siddha
2008-07-13  1:08             ` [PATCH] x86: let 32bit use apic_ops too Suresh Siddha
2008-07-13  2:04               ` Yinghai Lu
2008-07-13 16:28                 ` Suresh Siddha
2008-07-13 16:51                   ` Maciej W. Rozycki
2008-07-13 17:16                     ` Cyrill Gorcunov
2008-07-13 23:46                       ` Maciej W. Rozycki
2008-07-14 16:48                         ` Cyrill Gorcunov
2008-07-14 17:20                           ` Maciej W. Rozycki
2008-07-14 18:09                             ` Cyrill Gorcunov
2008-07-14 18:24                               ` Maciej W. Rozycki
2008-07-14 18:32                                 ` Cyrill Gorcunov
2008-07-13  1:43             ` Maciej W. Rozycki
2008-07-13  1:45               ` Yinghai Lu
2008-07-13  1:54                 ` Maciej W. Rozycki
2008-07-13 16:43                   ` Suresh Siddha
2008-07-13 17:05                     ` Maciej W. Rozycki
2008-07-14  5:19             ` [PATCH] x86: let 32bit use apic_ops too - fix Yinghai Lu
2008-07-14  7:12               ` Ingo Molnar
2008-07-14 16:49                 ` Suresh Siddha
2008-07-14 17:00                   ` Yinghai Lu
2008-07-14 18:03                     ` Suresh Siddha
2008-07-18 17:06                   ` Ingo Molnar
2008-07-15 17:33               ` Suresh Siddha
2008-07-15 18:10                 ` Yinghai Lu
2008-07-15 18:27                   ` Suresh Siddha
2008-07-18 17:07                 ` Ingo Molnar
2008-07-12 21:30           ` [PATCH] x86: max_low_pfn_mapped fix #1 Yinghai Lu
2008-07-13  9:45             ` Ingo Molnar
2008-07-12 21:31           ` [PATCH] x86: max_low_pfn_mapped fix #2 Yinghai Lu
2008-07-12 21:32           ` [PATCH] x86: max_low_pfn_mapped fix #3 Yinghai Lu
2008-07-13 21:29             ` [PATCH] x86: max_low_pfn_mapped fix #4 Yinghai Lu
2008-07-13 21:30             ` [PATCH] x86: get x86_phys_bits early Yinghai Lu
2008-07-13 21:32             ` [PATCH] x86: make 64bit hpet_set_mapping to use ioremap too Yinghai Lu
2008-07-13 21:50               ` [PATCH] x86: make 64bit hpet_set_mapping to use ioremap too v2 Yinghai Lu
2008-07-10  6:54       ` [PATCH] x86: merge __acpi_map_table Ingo Molnar
2008-07-10  6:53     ` [PATCh] x86: overmapped fix when 4K pages on tail - 64bit Ingo Molnar
2008-07-10  6:57       ` Yinghai Lu
2008-07-10  7:20         ` Ingo Molnar
2008-07-10  7:32           ` Yinghai Lu
2008-07-10 14:16     ` Arjan van de Ven
2008-07-13 14:57       ` Andi Kleen
2008-07-13 15:33         ` Arjan van de Ven
2008-07-13 18:25           ` Andi Kleen
2008-07-13 18:17         ` Yinghai Lu
2008-07-13 18:48           ` Andi Kleen
2008-07-13 19:00             ` Yinghai Lu
2008-07-13 20:32             ` Ingo Molnar
2008-07-13 20:51               ` Andi Kleen
2008-07-14  0:04                 ` H. Peter Anvin
2008-07-14  6:39                   ` Andi Kleen
2008-07-09  7:38 ` [PATCH] x86: introduce page_size_mask for 64bit Ingo Molnar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).