From: Yinghai Lu <yinghai@kernel.org>
To: Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@elte.hu>,
"H. Peter Anvin" <hpa@zytor.com>, Jacob Shin <jacob.shin@amd.com>,
Tejun Heo <tj@kernel.org>
Cc: linux-kernel@vger.kernel.org, Yinghai Lu <yinghai@kernel.org>
Subject: [PATCH 01/13] x86, mm: Add global page_size_mask and probe one time only
Date: Sun, 30 Sep 2012 00:57:12 -0700 [thread overview]
Message-ID: <1348991844-12285-2-git-send-email-yinghai@kernel.org> (raw)
In-Reply-To: <1348991844-12285-1-git-send-email-yinghai@kernel.org>
Now we pass around use_gbpages and use_pse for calculating page table size,
Later we will need to calculate page table size for every ram range, that
mean those calculation will be done several times.
Those info are the same for all ram range and could be stored in page_size_mask
and only probe them one time.
Move htat probing code from in init_memory_mapping into separated function
probe_page_size_mask, and call it before all init_memory_mapping.
Suggested-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Reviewed-by: Pekka Enberg <penberg@kernel.org>
---
arch/x86/include/asm/pgtable.h | 1 +
arch/x86/kernel/setup.c | 1 +
arch/x86/mm/init.c | 66 +++++++++++++++++++---------------------
3 files changed, 33 insertions(+), 35 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 402704f..c6f5779 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -598,6 +598,7 @@ static inline int pgd_none(pgd_t pgd)
#ifndef __ASSEMBLY__
extern int direct_gbpages;
+void probe_page_size_mask(void);
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 4f16547..20581d7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -912,6 +912,7 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode();
init_gbpages();
+ probe_page_size_mask();
/* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ab1f6a9..7903d54 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -35,8 +35,10 @@ struct map_range {
unsigned page_size_mask;
};
-static void __init find_early_table_space(struct map_range *mr, unsigned long end,
- int use_pse, int use_gbpages)
+static int page_size_mask;
+
+static void __init find_early_table_space(struct map_range *mr,
+ unsigned long end)
{
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
phys_addr_t base;
@@ -44,7 +46,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
- if (use_gbpages) {
+ if (page_size_mask & (1 << PG_LEVEL_1G)) {
unsigned long extra;
extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
@@ -54,7 +56,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
- if (use_pse) {
+ if (page_size_mask & (1 << PG_LEVEL_2M)) {
unsigned long extra;
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
@@ -90,6 +92,30 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
(pgt_buf_top << PAGE_SHIFT) - 1);
}
+void probe_page_size_mask(void)
+{
+#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
+ /*
+ * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
+ * This will simplify cpa(), which otherwise needs to support splitting
+ * large pages into small in interrupt context, etc.
+ */
+ if (direct_gbpages)
+ page_size_mask |= 1 << PG_LEVEL_1G;
+ if (cpu_has_pse)
+ page_size_mask |= 1 << PG_LEVEL_2M;
+#endif
+
+ /* Enable PSE if available */
+ if (cpu_has_pse)
+ set_in_cr4(X86_CR4_PSE);
+
+ /* Enable PGE if available */
+ if (cpu_has_pge) {
+ set_in_cr4(X86_CR4_PGE);
+ __supported_pte_mask |= _PAGE_GLOBAL;
+ }
+}
void __init native_pagetable_reserve(u64 start, u64 end)
{
memblock_reserve(start, end - start);
@@ -125,45 +151,15 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long end)
{
- unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
unsigned long ret = 0;
unsigned long pos;
-
struct map_range mr[NR_RANGE_MR];
int nr_range, i;
- int use_pse, use_gbpages;
printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
start, end - 1);
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
- /*
- * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
- * This will simplify cpa(), which otherwise needs to support splitting
- * large pages into small in interrupt context, etc.
- */
- use_pse = use_gbpages = 0;
-#else
- use_pse = cpu_has_pse;
- use_gbpages = direct_gbpages;
-#endif
-
- /* Enable PSE if available */
- if (cpu_has_pse)
- set_in_cr4(X86_CR4_PSE);
-
- /* Enable PGE if available */
- if (cpu_has_pge) {
- set_in_cr4(X86_CR4_PGE);
- __supported_pte_mask |= _PAGE_GLOBAL;
- }
-
- if (use_gbpages)
- page_size_mask |= 1 << PG_LEVEL_1G;
- if (use_pse)
- page_size_mask |= 1 << PG_LEVEL_2M;
-
memset(mr, 0, sizeof(mr));
nr_range = 0;
@@ -267,7 +263,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* nodes are discovered.
*/
if (!after_bootmem)
- find_early_table_space(&mr[0], end, use_pse, use_gbpages);
+ find_early_table_space(&mr[0], end);
for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
--
1.7.7
next prev parent reply other threads:[~2012-09-30 7:59 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-09-30 7:57 [PATCH -v4 00/13] x86, mm: init_memory_mapping cleanup Yinghai Lu
2012-09-30 7:57 ` Yinghai Lu [this message]
2012-09-30 7:57 ` [PATCH 02/13] x86, mm: Split out split_mem_range from init_memory_mapping Yinghai Lu
2012-09-30 7:57 ` [PATCH 03/13] x86, mm: Move init_memory_mapping calling out of setup.c Yinghai Lu
2012-09-30 7:57 ` [PATCH 04/13] x86, mm: Revert back good_end setting for 64bit Yinghai Lu
2012-10-01 11:00 ` Stefano Stabellini
2012-10-03 16:51 ` Jacob Shin
2012-10-03 18:34 ` H. Peter Anvin
2012-10-04 13:56 ` Konrad Rzeszutek Wilk
2012-10-04 21:52 ` H. Peter Anvin
2012-10-04 16:19 ` Yinghai Lu
2012-10-04 16:46 ` Konrad Rzeszutek Wilk
2012-10-04 21:29 ` Yinghai Lu
2012-10-05 21:04 ` Eric W. Biederman
2012-10-05 21:19 ` Yinghai Lu
2012-10-05 21:32 ` Eric W. Biederman
2012-10-05 21:37 ` Yinghai Lu
2012-10-05 21:41 ` Eric W. Biederman
2012-10-05 21:43 ` Yinghai Lu
2012-10-05 22:01 ` 896MB address limit (was: Re: [PATCH 04/13] x86, mm: Revert back good_end setting for 64bit) Eric W. Biederman
2012-10-06 0:18 ` [PATCH 04/13] x86, mm: Revert back good_end setting for 64bit H. Peter Anvin
2012-10-06 0:45 ` Eric W. Biederman
2012-10-06 1:02 ` H. Peter Anvin
2012-10-06 0:17 ` H. Peter Anvin
2012-10-06 0:28 ` Eric W. Biederman
2012-10-06 0:36 ` H. Peter Anvin
2012-10-04 15:57 ` Yinghai Lu
2012-10-04 16:45 ` Konrad Rzeszutek Wilk
2012-10-04 21:21 ` Yinghai Lu
2012-10-04 21:40 ` Yinghai Lu
2012-10-04 21:41 ` H. Peter Anvin
2012-10-04 21:46 ` Yinghai Lu
2012-10-04 21:54 ` H. Peter Anvin
2012-10-05 7:46 ` Yinghai Lu
2012-10-05 11:27 ` Stefano Stabellini
2012-10-05 14:58 ` Yinghai Lu
2012-10-06 7:44 ` [PATCH 0/3] x86: pre mapping page table to make xen happy Yinghai Lu
2012-10-06 7:44 ` [PATCH 1/3] x86: get early page table from BRK Yinghai Lu
2012-10-08 12:09 ` Stefano Stabellini
2012-10-06 7:44 ` [PATCH 2/3] x86, mm: Don't clear page table if next range is ram Yinghai Lu
2012-10-09 15:46 ` Konrad Rzeszutek Wilk
2012-10-10 1:00 ` Yinghai Lu
2012-10-10 13:41 ` Konrad Rzeszutek Wilk
2012-10-10 14:43 ` Yinghai Lu
2012-10-06 7:44 ` [PATCH 3/3] x86, mm: Remove early_memremap workaround for page table accessing Yinghai Lu
2012-10-09 15:48 ` Konrad Rzeszutek Wilk
2012-10-08 6:36 ` [PATCH 04/13] x86, mm: Revert back good_end setting for 64bit Yinghai Lu
2012-10-05 10:47 ` Stefano Stabellini
2012-09-30 7:57 ` [PATCH 05/13] x86, mm: Find early page table buffer altogether Yinghai Lu
2012-09-30 7:57 ` [PATCH 06/13] x86, mm: Separate out calculate_table_space_size() Yinghai Lu
2012-09-30 7:57 ` [PATCH 07/13] x86, mm: Move down two calculate_table_space_size down Yinghai Lu
2012-09-30 7:57 ` [PATCH 08/13] x86, mm: Set memblock initial limit to 1M Yinghai Lu
2012-09-30 7:57 ` [PATCH 09/13] x86: if kernel .text .data .bss are not marked as E820_RAM, complain and fix Yinghai Lu
2012-09-30 7:57 ` [PATCH 10/13] x86: Fixup code testing if a pfn is direct mapped Yinghai Lu
2012-09-30 7:57 ` [PATCH 11/13] x86: Only direct map addresses that are marked as E820_RAM Yinghai Lu
2012-09-30 7:57 ` [PATCH 12/13] x86/mm: calculate_table_space_size based on memory ranges that are being mapped Yinghai Lu
2012-09-30 7:57 ` [PATCH 13/13] x86, mm: Use func pointer to table size calculation and mapping Yinghai Lu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1348991844-12285-2-git-send-email-yinghai@kernel.org \
--to=yinghai@kernel.org \
--cc=hpa@zytor.com \
--cc=jacob.shin@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).