public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Vivek Goyal <vgoyal@in.ibm.com>
To: linux kernel mailing list <linux-kernel@vger.kernel.org>
Cc: Reloc Kernel List <fastboot@lists.osdl.org>,
	ebiederm@xmission.com, akpm@linux-foundation.org, ak@suse.de,
	hpa@zytor.com, magnus.damm@gmail.com, lwang@redhat.com,
	dzickus@redhat.com, pavel@suse.cz, rjw@sisk.pl
Subject: [PATCH 2/20] x86_64: Kill temp boot pmds
Date: Wed, 7 Mar 2007 12:30:59 +0530	[thread overview]
Message-ID: <20070307070059.GC23412@in.ibm.com> (raw)
In-Reply-To: <20070307065703.GA23412@in.ibm.com>



Early in the boot process we need the ability to set
up temporary mappings, before our normal mechanisms are
initialized.  Currently this is used to map pages that
are part of the page tables we are building and pages
during the dmi scan.

The core problem is that we are using the user portion of
the page tables to implement this.  Which means that while
this mechanism is active we cannot catch NULL pointer dereferences
and we deviate from the normal ways of handling things.

In this patch I modify early_ioremap to map pages into
the kernel portion of address space, roughly where
we will later put modules, and I make the discovery of
which addresses we can use dynamic which removes all
kinds of static limits and remove the dependencies
on implementation details between different parts of the code.

Now alloc_low_page() and unmap_low_page() use 
early_iomap() and early_iounmap() to allocate/map and 
unmap a page.

Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com>
---

 arch/x86_64/kernel/head.S |    3 -
 arch/x86_64/mm/init.c     |  100 ++++++++++++++++++++--------------------------
 2 files changed, 45 insertions(+), 58 deletions(-)

diff -puN arch/x86_64/kernel/head.S~x86_64-Kill-temp_boot_pmds arch/x86_64/kernel/head.S
--- linux-2.6.21-rc2-reloc/arch/x86_64/kernel/head.S~x86_64-Kill-temp_boot_pmds	2007-03-07 01:21:26.000000000 +0530
+++ linux-2.6.21-rc2-reloc-root/arch/x86_64/kernel/head.S	2007-03-07 01:21:26.000000000 +0530
@@ -288,9 +288,6 @@ NEXT_PAGE(level2_ident_pgt)
 	.quad	i << 21 | 0x083
 	i = i + 1
 	.endr
-	/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
-	.globl temp_boot_pmds
-temp_boot_pmds:
 	.fill	492,8,0
 	
 NEXT_PAGE(level2_kernel_pgt)
diff -puN arch/x86_64/mm/init.c~x86_64-Kill-temp_boot_pmds arch/x86_64/mm/init.c
--- linux-2.6.21-rc2-reloc/arch/x86_64/mm/init.c~x86_64-Kill-temp_boot_pmds	2007-03-07 01:21:26.000000000 +0530
+++ linux-2.6.21-rc2-reloc-root/arch/x86_64/mm/init.c	2007-03-07 01:21:26.000000000 +0530
@@ -167,23 +167,9 @@ __set_fixmap (enum fixed_addresses idx, 
 
 unsigned long __initdata table_start, table_end; 
 
-extern pmd_t temp_boot_pmds[]; 
-
-static  struct temp_map { 
-	pmd_t *pmd;
-	void  *address; 
-	int    allocated; 
-} temp_mappings[] __initdata = { 
-	{ &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
-	{ &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, 
-	{}
-}; 
-
-static __meminit void *alloc_low_page(int *index, unsigned long *phys)
+static __meminit void *alloc_low_page(unsigned long *phys)
 { 
-	struct temp_map *ti;
-	int i; 
-	unsigned long pfn = table_end++, paddr; 
+	unsigned long pfn = table_end++;
 	void *adr;
 
 	if (after_bootmem) {
@@ -194,57 +180,63 @@ static __meminit void *alloc_low_page(in
 
 	if (pfn >= end_pfn) 
 		panic("alloc_low_page: ran out of memory"); 
-	for (i = 0; temp_mappings[i].allocated; i++) {
-		if (!temp_mappings[i].pmd) 
-			panic("alloc_low_page: ran out of temp mappings"); 
-	} 
-	ti = &temp_mappings[i];
-	paddr = (pfn << PAGE_SHIFT) & PMD_MASK; 
-	set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE)); 
-	ti->allocated = 1; 
-	__flush_tlb(); 	       
-	adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); 
+
+	adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
 	memset(adr, 0, PAGE_SIZE);
-	*index = i; 
-	*phys  = pfn * PAGE_SIZE;  
-	return adr; 
-} 
+	*phys  = pfn * PAGE_SIZE;
+	return adr;
+}
 
-static __meminit void unmap_low_page(int i)
+static __meminit void unmap_low_page(void *adr)
 { 
-	struct temp_map *ti;
 
 	if (after_bootmem)
 		return;
 
-	ti = &temp_mappings[i];
-	set_pmd(ti->pmd, __pmd(0));
-	ti->allocated = 0; 
+	early_iounmap(adr, PAGE_SIZE);
 } 
 
 /* Must run before zap_low_mappings */
 __init void *early_ioremap(unsigned long addr, unsigned long size)
 {
-	unsigned long map = round_down(addr, LARGE_PAGE_SIZE); 
-
-	/* actually usually some more */
-	if (size >= LARGE_PAGE_SIZE) { 
-		return NULL;
+	unsigned long vaddr;
+	pmd_t *pmd, *last_pmd;
+	int i, pmds;
+
+	pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
+	vaddr = __START_KERNEL_map;
+	pmd = level2_kernel_pgt;
+	last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
+	for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
+		for (i = 0; i < pmds; i++) {
+			if (pmd_present(pmd[i]))
+				goto next;
+		}
+		vaddr += addr & ~PMD_MASK;
+		addr &= PMD_MASK;
+		for (i = 0; i < pmds; i++, addr += PMD_SIZE)
+			set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
+		__flush_tlb();
+		return (void *)vaddr;
+	next:
+		;
 	}
-	set_pmd(temp_mappings[0].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
-	map += LARGE_PAGE_SIZE;
-	set_pmd(temp_mappings[1].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
-	__flush_tlb();
-	return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
+	printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
+	return NULL;
 }
 
 /* To avoid virtual aliases later */
 __init void early_iounmap(void *addr, unsigned long size)
 {
-	if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
-		printk("early_iounmap: bad address %p\n", addr);
-	set_pmd(temp_mappings[0].pmd, __pmd(0));
-	set_pmd(temp_mappings[1].pmd, __pmd(0));
+	unsigned long vaddr;
+	pmd_t *pmd;
+	int i, pmds;
+
+	vaddr = (unsigned long)addr;
+	pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
+	pmd = level2_kernel_pgt + pmd_index(vaddr);
+	for (i = 0; i < pmds; i++)
+		pmd_clear(pmd + i);
 	__flush_tlb();
 }
 
@@ -289,7 +281,6 @@ static void __meminit phys_pud_init(pud_
 
 
 	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
-		int map; 
 		unsigned long pmd_phys;
 		pud_t *pud = pud_page + pud_index(addr);
 		pmd_t *pmd;
@@ -307,12 +298,12 @@ static void __meminit phys_pud_init(pud_
 			continue;
 		}
 
-		pmd = alloc_low_page(&map, &pmd_phys);
+		pmd = alloc_low_page(&pmd_phys);
 		spin_lock(&init_mm.page_table_lock);
 		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
 		phys_pmd_init(pmd, addr, end);
 		spin_unlock(&init_mm.page_table_lock);
-		unmap_low_page(map);
+		unmap_low_page(pmd);
 	}
 	__flush_tlb();
 } 
@@ -364,7 +355,6 @@ void __meminit init_memory_mapping(unsig
 	end = (unsigned long)__va(end);
 
 	for (; start < end; start = next) {
-		int map;
 		unsigned long pud_phys; 
 		pgd_t *pgd = pgd_offset_k(start);
 		pud_t *pud;
@@ -372,7 +362,7 @@ void __meminit init_memory_mapping(unsig
 		if (after_bootmem)
 			pud = pud_offset(pgd, start & PGDIR_MASK);
 		else
-			pud = alloc_low_page(&map, &pud_phys);
+			pud = alloc_low_page(&pud_phys);
 
 		next = start + PGDIR_SIZE;
 		if (next > end) 
@@ -380,7 +370,7 @@ void __meminit init_memory_mapping(unsig
 		phys_pud_init(pud, __pa(start), __pa(next));
 		if (!after_bootmem)
 			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
-		unmap_low_page(map);   
+		unmap_low_page(pud);
 	} 
 
 	if (!after_bootmem)
_

  parent reply	other threads:[~2007-03-07  7:32 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-03-07  6:57 [PATCH 0/20] x86_64 Relocatable bzImage support (V4) Vivek Goyal
2007-03-07  6:59 ` [PATCH 1/20] x86_64: Assembly safe page.h and pgtable.h Vivek Goyal
2007-03-07 19:24   ` Sam Ravnborg
2007-03-08  6:01     ` Vivek Goyal
2007-03-08  6:16       ` Eric W. Biederman
2007-03-07  7:00 ` Vivek Goyal [this message]
2007-03-07  7:02 ` [PATCH 3/20] x86_64: Clean up the early boot page table Vivek Goyal
2007-03-07  7:03 ` [PATCH 4/20] x86_64: Fix early printk to use standard ISA mapping Vivek Goyal
2007-03-07  7:04 ` [PATCH 5/20] x86_64: modify copy_bootdata to use virtual addresses Vivek Goyal
2007-03-07  7:06 ` [PATCH 6/20] x86_64: cleanup segments Vivek Goyal
2007-03-07  7:08 ` [PATCH 7/20] x86_64: Add EFER to the register set saved by save_processor_state Vivek Goyal
2007-03-07  7:09 ` [PATCH 8/20] x86_64: 64bit PIC SMP trampoline Vivek Goyal
2007-03-07  7:10 ` [PATCH 9/20] x86_64: Get rid of dead code in suspend resume Vivek Goyal
2007-03-07  7:12 ` [PATCH 10/20] x86_64: wakeup.S rename registers to reflect right names Vivek Goyal
2007-03-07 22:30   ` Pavel Machek
2007-03-07  7:13 ` [PATCH 11/20] x86_64: wakeup.S misc cleanups Vivek Goyal
2007-03-07 22:40   ` Pavel Machek
2007-03-08  4:25     ` Vivek Goyal
2007-03-07 22:41   ` Pavel Machek
2007-03-08  4:29     ` Vivek Goyal
2007-03-08 11:43       ` Pavel Machek
2007-03-08 16:45         ` [Fastboot] " Lombard, David N
2007-03-07  7:14 ` [PATCH 12/20] x86_64: 64bit ACPI wakeup trampoline Vivek Goyal
2007-03-07 22:45   ` Pavel Machek
2007-03-07 22:57     ` [Fastboot] " Bernhard Walle
2007-03-08  4:58     ` Vivek Goyal
2007-03-08 11:44       ` Pavel Machek
2007-03-07  7:16 ` [PATCH 13/20] x86_64: Modify discover_ebda to use virtual addresses Vivek Goyal
2007-03-07  7:17 ` [PATCH 14/20] x86_64: Remove the identity mapping as early as possible Vivek Goyal
2007-03-07  7:18 ` [PATCH 15/20] Move swsusp __pa() dependent code to arch portion Vivek Goyal
2007-03-07 22:47   ` Pavel Machek
2007-03-08  5:34     ` Vivek Goyal
2007-03-08 11:47       ` Pavel Machek
2007-03-07  7:20 ` [PATCH 16/20] swsusp: do not use virt_to_page on kernel data address Vivek Goyal
2007-03-07 22:49   ` Pavel Machek
2007-03-08  5:17     ` Vivek Goyal
2007-03-08 11:47       ` Pavel Machek
2007-03-07 22:50   ` Pavel Machek
2007-03-07 23:15     ` Nigel Cunningham
2007-03-08  5:04     ` Vivek Goyal
2007-03-08 11:44       ` Pavel Machek
2007-03-07  7:21 ` [PATCH 17/20] x86_64: __pa and __pa_symbol address space separation Vivek Goyal
2007-03-07  7:22 ` [PATCH 18/20] x86_64: Relocatable Kernel Support Vivek Goyal
2007-03-07  7:24 ` [PATCH 19/20] x86_64: Extend bzImage protocol for relocatable bzImage Vivek Goyal
2007-03-07  7:25 ` [PATCH 20/20] x86_64: Move cpu verification code to common file Vivek Goyal
2007-03-07 15:07 ` [PATCH 0/20] x86_64 Relocatable bzImage support (V4) Arjan van de Ven
2007-03-07 19:08   ` Eric W. Biederman
2007-03-07 20:49   ` Nigel Cunningham
2007-03-07 23:15     ` Nigel Cunningham
2007-03-08  4:40       ` Vivek Goyal
2007-03-08  8:07         ` Nigel Cunningham
2007-03-08  8:27           ` Vivek Goyal
2007-03-08  7:48       ` Vivek Goyal
2007-03-08  3:36   ` Vivek Goyal
2007-03-14 23:10 ` Andi Kleen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20070307070059.GC23412@in.ibm.com \
    --to=vgoyal@in.ibm.com \
    --cc=ak@suse.de \
    --cc=akpm@linux-foundation.org \
    --cc=dzickus@redhat.com \
    --cc=ebiederm@xmission.com \
    --cc=fastboot@lists.osdl.org \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lwang@redhat.com \
    --cc=magnus.damm@gmail.com \
    --cc=pavel@suse.cz \
    --cc=rjw@sisk.pl \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox