* [PATCH v2 1/3] x86/mm: Simplify memory mapping PFN calculation
@ 2012-07-15 11:04 Pekka Enberg
2012-07-15 11:04 ` [PATCH v2 2/3] x86/mm: Simplify free_init_pages() Pekka Enberg
2012-07-15 11:04 ` [PATCH v2 3/3] x86/mm: Separate paging setup from memory mapping Pekka Enberg
0 siblings, 2 replies; 3+ messages in thread
From: Pekka Enberg @ 2012-07-15 11:04 UTC (permalink / raw)
To: mingo; +Cc: yinghai, linux-kernel, x86, Pekka Enberg, Joe Perces, Tejun Heo
Introduce two new helper functions, addr_to_pmd_pfn() and
addr_to_pud_pfn(), to simplify init_memory_mapping() code flow.
Cc: Joe Perces <joe@perches.com>
Cc: Tejun Heo <tj@kernel.org>
Acked-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
---
arch/x86/mm/init.c | 38 +++++++++++++++++++++-----------------
1 files changed, 21 insertions(+), 17 deletions(-)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index bc4e9d8..9eb53c2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -117,6 +117,16 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
return nr_range;
}
+static unsigned long addr_to_pmd_pfn(unsigned long addr)
+{
+ return (addr >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+}
+
+static unsigned long addr_to_pud_pfn(unsigned long addr)
+{
+ return (addr >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
+}
+
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
@@ -180,11 +190,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (pos == 0)
end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
else
- end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
- << (PMD_SHIFT - PAGE_SHIFT);
+ end_pfn = addr_to_pmd_pfn(pos + (PMD_SIZE - 1));
#else /* CONFIG_X86_64 */
- end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
- << (PMD_SHIFT - PAGE_SHIFT);
+ end_pfn = addr_to_pmd_pfn(pos + (PMD_SIZE - 1));
#endif
if (end_pfn > (end >> PAGE_SHIFT))
end_pfn = end >> PAGE_SHIFT;
@@ -194,15 +202,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
}
/* big page (2M) range */
- start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
- << (PMD_SHIFT - PAGE_SHIFT);
+ start_pfn = addr_to_pmd_pfn(pos + (PMD_SIZE - 1));
#ifdef CONFIG_X86_32
- end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+ end_pfn = addr_to_pmd_pfn(end);
#else /* CONFIG_X86_64 */
- end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
- << (PUD_SHIFT - PAGE_SHIFT);
- if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
- end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
+ end_pfn = addr_to_pud_pfn(pos + (PUD_SIZE - 1));
+ if (end_pfn > addr_to_pmd_pfn(end))
+ end_pfn = addr_to_pmd_pfn(end);
#endif
if (start_pfn < end_pfn) {
@@ -213,9 +219,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
#ifdef CONFIG_X86_64
/* big page (1G) range */
- start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
- << (PUD_SHIFT - PAGE_SHIFT);
- end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
+ start_pfn = addr_to_pud_pfn(pos + (PUD_SIZE - 1));
+ end_pfn = addr_to_pud_pfn(end);
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask &
@@ -224,9 +229,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
}
/* tail is not big page (1G) alignment */
- start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
- << (PMD_SHIFT - PAGE_SHIFT);
- end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+ start_pfn = addr_to_pmd_pfn(pos + (PMD_SIZE - 1));
+ end_pfn = addr_to_pmd_pfn(end);
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
--
1.7.7.6
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v2 2/3] x86/mm: Simplify free_init_pages()
2012-07-15 11:04 [PATCH v2 1/3] x86/mm: Simplify memory mapping PFN calculation Pekka Enberg
@ 2012-07-15 11:04 ` Pekka Enberg
2012-07-15 11:04 ` [PATCH v2 3/3] x86/mm: Separate paging setup from memory mapping Pekka Enberg
1 sibling, 0 replies; 3+ messages in thread
From: Pekka Enberg @ 2012-07-15 11:04 UTC (permalink / raw)
To: mingo; +Cc: yinghai, linux-kernel, x86, Pekka Enberg, Joe Perches, Tejun Heo
As a cleanup, separate the #ifdef'd code in a new helper function and move
initial "addr" assignment to the for-loop construct.
Cc: Joe Perches <joe@perches.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
---
arch/x86/mm/init.c | 44 ++++++++++++++++++++++++--------------------
1 files changed, 24 insertions(+), 20 deletions(-)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 9eb53c2..4f863cc 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -332,35 +332,20 @@ int devmem_is_allowed(unsigned long pagenr)
return 0;
}
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
+static void __free_init_pages(char *what, unsigned long begin, unsigned long end)
{
- unsigned long addr;
- unsigned long begin_aligned, end_aligned;
-
- /* Make sure boundaries are page aligned */
- begin_aligned = PAGE_ALIGN(begin);
- end_aligned = end & PAGE_MASK;
-
- if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
- begin = begin_aligned;
- end = end_aligned;
- }
-
- if (begin >= end)
- return;
-
- addr = begin;
-
+#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* If debugging page accesses then do not free this memory but
* mark them not present - any buggy init-section access will
* create a kernel page fault:
*/
-#ifdef CONFIG_DEBUG_PAGEALLOC
printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
begin, end - 1);
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
+ unsigned long addr;
+
/*
* We just marked the kernel text read only above, now that
* we are going to free part of that, we need to make that
@@ -371,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
- for (; addr < end; addr += PAGE_SIZE) {
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
@@ -381,6 +366,25 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
#endif
}
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long begin_aligned, end_aligned;
+
+ /* Make sure boundaries are page aligned */
+ begin_aligned = PAGE_ALIGN(begin);
+ end_aligned = end & PAGE_MASK;
+
+ if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
+ begin = begin_aligned;
+ end = end_aligned;
+ }
+
+ if (begin >= end)
+ return;
+
+ __free_init_pages(what, begin, end);
+}
+
void free_initmem(void)
{
free_init_pages("unused kernel memory",
--
1.7.7.6
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v2 3/3] x86/mm: Separate paging setup from memory mapping
2012-07-15 11:04 [PATCH v2 1/3] x86/mm: Simplify memory mapping PFN calculation Pekka Enberg
2012-07-15 11:04 ` [PATCH v2 2/3] x86/mm: Simplify free_init_pages() Pekka Enberg
@ 2012-07-15 11:04 ` Pekka Enberg
1 sibling, 0 replies; 3+ messages in thread
From: Pekka Enberg @ 2012-07-15 11:04 UTC (permalink / raw)
To: mingo; +Cc: yinghai, linux-kernel, x86, Pekka Enberg, Joe Perches, Tejun Heo
Move PSE and PGE bit twiddling from init_memory_mapping() to a new
setup_paging() function to simplify the former function. The
init_memory_mapping() function is called later in the boot process by
gart_iommu_init(), efi_ioremap(), and arch_add_memory() which have no
business whatsover updating the CR4 register.
Cc: Joe Perches <joe@perches.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
---
arch/x86/include/asm/page_types.h | 2 ++
arch/x86/kernel/setup.c | 2 ++
arch/x86/mm/init.c | 23 +++++++++++++----------
3 files changed, 17 insertions(+), 10 deletions(-)
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index e21fdd1..529905e 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,6 +51,8 @@ static inline phys_addr_t get_max_mapped(void)
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}
+extern void setup_paging(void);
+
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 16be6dc..a883978 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -913,6 +913,8 @@ void __init setup_arch(char **cmdline_p)
init_gbpages();
+ setup_paging();
+
/* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4f863cc..a972325 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -127,6 +127,19 @@ static unsigned long addr_to_pud_pfn(unsigned long addr)
return (addr >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
}
+void setup_paging(void)
+{
+ /* Enable PSE if available */
+ if (cpu_has_pse)
+ set_in_cr4(X86_CR4_PSE);
+
+ /* Enable PGE if available */
+ if (cpu_has_pge) {
+ set_in_cr4(X86_CR4_PGE);
+ __supported_pte_mask |= _PAGE_GLOBAL;
+ }
+}
+
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
@@ -159,16 +172,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
use_gbpages = direct_gbpages;
#endif
- /* Enable PSE if available */
- if (cpu_has_pse)
- set_in_cr4(X86_CR4_PSE);
-
- /* Enable PGE if available */
- if (cpu_has_pge) {
- set_in_cr4(X86_CR4_PGE);
- __supported_pte_mask |= _PAGE_GLOBAL;
- }
-
if (use_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G;
if (use_pse)
--
1.7.7.6
^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2012-07-15 11:05 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-07-15 11:04 [PATCH v2 1/3] x86/mm: Simplify memory mapping PFN calculation Pekka Enberg
2012-07-15 11:04 ` [PATCH v2 2/3] x86/mm: Simplify free_init_pages() Pekka Enberg
2012-07-15 11:04 ` [PATCH v2 3/3] x86/mm: Separate paging setup from memory mapping Pekka Enberg
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).