public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86: move init_gbpages() to setup_arch()
@ 2009-06-22 14:39 Pekka J Enberg
  2009-06-22 15:46 ` Yinghai Lu
  2009-06-23  8:51 ` [tip:x86/urgent] x86: Move " tip-bot for Pekka J Enberg
  0 siblings, 2 replies; 3+ messages in thread
From: Pekka J Enberg @ 2009-06-22 14:39 UTC (permalink / raw)
  To: mingo; +Cc: x86, yinghai, linux-kernel

From: Pekka Enberg <penberg@cs.helsinki.fi>

The init_gbpages() function is conditionally called from init_memory_mapping()
function. There are two call-sites where this 'after_bootmem' condition can be
true: setup_arch() and mem_init() via pci_iommu_alloc().

Therefore, it's safe to move the call to init_gbpages() to setup_arch() as it's
always called before mem_init().

Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---
 arch/x86/kernel/setup.c |   16 ++++++++++++++++
 arch/x86/mm/init.c      |   17 -----------------
 2 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index be5ae80..de2cab1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
 	return ret;
 }
 
+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+	if (direct_gbpages && cpu_has_gbpages)
+		printk(KERN_INFO "Using GB pages for direct mapping\n");
+	else
+		direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
 static void __init reserve_brk(void)
 {
 	if (_brk_end > _brk_start)
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)
 
 	reserve_brk();
 
+	init_gbpages();
+
 	/* max_pfn_mapped is updated here */
 	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
 	max_pfn_mapped = max_low_pfn_mapped;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f53b57e..47ce9a2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
 	return nr_range;
 }
 
-#ifdef CONFIG_X86_64
-static void __init init_gbpages(void)
-{
-	if (direct_gbpages && cpu_has_gbpages)
-		printk(KERN_INFO "Using GB pages for direct mapping\n");
-	else
-		direct_gbpages = 0;
-}
-#else
-static inline void init_gbpages(void)
-{
-}
-#endif
-
 /*
  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  * This runs before bootmem is initialized and gets pages directly from
@@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 
 	printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
 
-	if (!after_bootmem)
-		init_gbpages();
-
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
 	/*
 	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
-- 
1.6.0.4


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] x86: move init_gbpages() to setup_arch()
  2009-06-22 14:39 [PATCH] x86: move init_gbpages() to setup_arch() Pekka J Enberg
@ 2009-06-22 15:46 ` Yinghai Lu
  2009-06-23  8:51 ` [tip:x86/urgent] x86: Move " tip-bot for Pekka J Enberg
  1 sibling, 0 replies; 3+ messages in thread
From: Yinghai Lu @ 2009-06-22 15:46 UTC (permalink / raw)
  To: Pekka J Enberg; +Cc: mingo, x86, linux-kernel

Pekka J Enberg wrote:
> From: Pekka Enberg <penberg@cs.helsinki.fi>
> 
> The init_gbpages() function is conditionally called from init_memory_mapping()
> function. There are two call-sites where this 'after_bootmem' condition can be
> true: setup_arch() and mem_init() via pci_iommu_alloc().
> 
> Therefore, it's safe to move the call to init_gbpages() to setup_arch() as it's
> always called before mem_init().
> 
> Cc: Yinghai Lu <yinghai@kernel.org>
> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
> ---
>  arch/x86/kernel/setup.c |   16 ++++++++++++++++
>  arch/x86/mm/init.c      |   17 -----------------
>  2 files changed, 16 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index be5ae80..de2cab1 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
>  	return ret;
>  }
>  
> +#ifdef CONFIG_X86_64
> +static void __init init_gbpages(void)
> +{
> +	if (direct_gbpages && cpu_has_gbpages)
> +		printk(KERN_INFO "Using GB pages for direct mapping\n");
> +	else
> +		direct_gbpages = 0;
> +}
> +#else
> +static inline void init_gbpages(void)
> +{
> +}
> +#endif
> +
>  static void __init reserve_brk(void)
>  {
>  	if (_brk_end > _brk_start)
> @@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)
>  
>  	reserve_brk();
>  
> +	init_gbpages();
> +
>  	/* max_pfn_mapped is updated here */
>  	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
>  	max_pfn_mapped = max_low_pfn_mapped;
> diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
> index f53b57e..47ce9a2 100644
> --- a/arch/x86/mm/init.c
> +++ b/arch/x86/mm/init.c
> @@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
>  	return nr_range;
>  }
>  
> -#ifdef CONFIG_X86_64
> -static void __init init_gbpages(void)
> -{
> -	if (direct_gbpages && cpu_has_gbpages)
> -		printk(KERN_INFO "Using GB pages for direct mapping\n");
> -	else
> -		direct_gbpages = 0;
> -}
> -#else
> -static inline void init_gbpages(void)
> -{
> -}
> -#endif
> -
>  /*
>   * Setup the direct mapping of the physical memory at PAGE_OFFSET.
>   * This runs before bootmem is initialized and gets pages directly from
> @@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
>  
>  	printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
>  
> -	if (!after_bootmem)
> -		init_gbpages();
> -
>  #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
>  	/*
>  	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.


Acked-by: Yinghai Lu <yinghai@kernel.org>

YH

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [tip:x86/urgent] x86: Move init_gbpages() to setup_arch()
  2009-06-22 14:39 [PATCH] x86: move init_gbpages() to setup_arch() Pekka J Enberg
  2009-06-22 15:46 ` Yinghai Lu
@ 2009-06-23  8:51 ` tip-bot for Pekka J Enberg
  1 sibling, 0 replies; 3+ messages in thread
From: tip-bot for Pekka J Enberg @ 2009-06-23  8:51 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, hpa, mingo, yinghai, penberg, tglx, mingo

Commit-ID:  854c879f5abf309ebd378bea1ee41acf4ddf7194
Gitweb:     http://git.kernel.org/tip/854c879f5abf309ebd378bea1ee41acf4ddf7194
Author:     Pekka J Enberg <penberg@cs.helsinki.fi>
AuthorDate: Mon, 22 Jun 2009 17:39:41 +0300
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 23 Jun 2009 10:33:32 +0200

x86: Move init_gbpages() to setup_arch()

The init_gbpages() function is conditionally called from
init_memory_mapping() function. There are two call-sites where
this 'after_bootmem' condition can be true: setup_arch() and
mem_init() via pci_iommu_alloc().

Therefore, it's safe to move the call to init_gbpages() to
setup_arch() as it's always called before mem_init().

This removes an after_bootmem use - paving the way to remove
all uses of that state variable.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <Pine.LNX.4.64.0906221731210.19474@melkki.cs.Helsinki.FI>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 arch/x86/kernel/setup.c |   16 ++++++++++++++++
 arch/x86/mm/init.c      |   17 -----------------
 2 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index be5ae80..de2cab1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
 	return ret;
 }
 
+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+	if (direct_gbpages && cpu_has_gbpages)
+		printk(KERN_INFO "Using GB pages for direct mapping\n");
+	else
+		direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
 static void __init reserve_brk(void)
 {
 	if (_brk_end > _brk_start)
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)
 
 	reserve_brk();
 
+	init_gbpages();
+
 	/* max_pfn_mapped is updated here */
 	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
 	max_pfn_mapped = max_low_pfn_mapped;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f53b57e..47ce9a2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
 	return nr_range;
 }
 
-#ifdef CONFIG_X86_64
-static void __init init_gbpages(void)
-{
-	if (direct_gbpages && cpu_has_gbpages)
-		printk(KERN_INFO "Using GB pages for direct mapping\n");
-	else
-		direct_gbpages = 0;
-}
-#else
-static inline void init_gbpages(void)
-{
-}
-#endif
-
 /*
  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  * This runs before bootmem is initialized and gets pages directly from
@@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 
 	printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
 
-	if (!after_bootmem)
-		init_gbpages();
-
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
 	/*
 	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2009-06-23  8:52 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-06-22 14:39 [PATCH] x86: move init_gbpages() to setup_arch() Pekka J Enberg
2009-06-22 15:46 ` Yinghai Lu
2009-06-23  8:51 ` [tip:x86/urgent] x86: Move " tip-bot for Pekka J Enberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox