* [PATCH 01/10] arm64/efi: use dynamically allocated space for EFI pgd
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
@ 2015-05-11 7:12 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 02/10] arm64: reduce ID map to a single page Ard Biesheuvel
` (9 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:12 UTC (permalink / raw)
To: linux-arm-kernel
Now that we populate the EFI page tables during an initcall, we
can allocate the root pgd dynamically instead of statically in .bss.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/kernel/efi.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 5369b4f96dd1..f3ba25621f7a 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -32,17 +32,15 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
struct efi_memory_map memmap;
static u64 efi_system_table;
-static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
-
static struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
- .pgd = efi_pgd,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
@@ -227,6 +225,10 @@ static bool __init efi_virtmap_init(void)
{
efi_memory_desc_t *md;
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ if (!efi_mm.pgd)
+ return false;
+
for_each_efi_memory_desc(&memmap, md) {
u64 paddr, npages, size;
pgprot_t prot;
@@ -313,7 +315,7 @@ static int __init arm64_enable_runtime_services(void)
return 0;
}
-early_initcall(arm64_enable_runtime_services);
+postcore_initcall(arm64_enable_runtime_services);
static int __init arm64_dmi_init(void)
{
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 02/10] arm64: reduce ID map to a single page
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
2015-05-11 7:12 ` [PATCH 01/10] arm64/efi: use dynamically allocated space for EFI pgd Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 03/10] arm64: drop sleep_idmap_phys and clean up cpu_resume() Ard Biesheuvel
` (8 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
Commit ea8c2e112445 ("arm64: Extend the idmap to the whole kernel
image") changed the early page table code so that the entire kernel
Image is covered by the identity map. This allows functions that
need to enable or disable the MMU to reside anywhere in the kernel
Image.
However, this change has the unfortunate side effect that the Image
cannot cross a physical 512 MB alignment boundary anymore, since the
early page table code cannot deal with the Image crossing a /virtual/
512 MB alignment boundary.
So instead, reduce the ID map to a single page, that is populated by
the contents of the .idmap.text section. Only three functions reside
there at the moment: __enable_mmu(), cpu_resume_mmu() and cpu_reset().
If new code is introduced that needs to manipulate the MMU state, it
should be added to this section as well.
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/kernel/head.S | 13 +++++++------
arch/arm64/kernel/sleep.S | 2 ++
arch/arm64/kernel/vmlinux.lds.S | 11 ++++++++++-
arch/arm64/mm/proc.S | 3 ++-
4 files changed, 21 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 30cffc5e7402..c0ff3ce4299e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -361,7 +361,7 @@ __create_page_tables:
* Create the identity mapping.
*/
mov x0, x25 // idmap_pg_dir
- adrp x3, KERNEL_START // __pa(KERNEL_START)
+ adrp x3, __idmap_text_start // __pa(__idmap_text_start)
#ifndef CONFIG_ARM64_VA_BITS_48
#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
@@ -384,11 +384,11 @@ __create_page_tables:
/*
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
- * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
+ * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
- * the physical address of KERNEL_END.
+ * the physical address of __idmap_text_end.
*/
- adrp x5, KERNEL_END
+ adrp x5, __idmap_text_end
clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
b.ge 1f // .. then skip additional level
@@ -403,8 +403,8 @@ __create_page_tables:
#endif
create_pgd_entry x0, x3, x5, x6
- mov x5, x3 // __pa(KERNEL_START)
- adr_l x6, KERNEL_END // __pa(KERNEL_END)
+ mov x5, x3 // __pa(__idmap_text_start)
+ adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
create_block_map x0, x7, x3, x5, x6
/*
@@ -632,6 +632,7 @@ ENDPROC(__secondary_switched)
*
* other registers depend on the function called upon completion
*/
+ .section ".idmap.text", "ax"
__enable_mmu:
ldr x5, =vectors
msr vbar_el1, x5
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index ede186cdd452..811e61a2d847 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter)
/*
* x0 must contain the sctlr value retrieved from restored context
*/
+ .pushsection ".idmap.text", "ax"
ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1
isb
br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu)
+ .popsection
cpu_resume_after_mmu:
mov x0, #0 // return zero on success
ldp x19, x20, [sp, #16]
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index a2c29865c3fe..98073332e2d0 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -38,6 +38,12 @@ jiffies = jiffies_64;
*(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .;
+#define IDMAP_TEXT \
+ . = ALIGN(SZ_4K); \
+ VMLINUX_SYMBOL(__idmap_text_start) = .; \
+ *(.idmap.text) \
+ VMLINUX_SYMBOL(__idmap_text_end) = .;
+
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -95,6 +101,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
HYPERVISOR_TEXT
+ IDMAP_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
@@ -167,11 +174,13 @@ SECTIONS
}
/*
- * The HYP init code can't be more than a page long,
+ * The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big or misaligned")
+ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+ "ID map text too big or misaligned")
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cdd754e19b9b..a265934ab0af 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -67,7 +67,7 @@ ENDPROC(cpu_cache_off)
*
* - loc - location to jump to for soft reset
*/
- .align 5
+ .pushsection ".idmap.text", "ax"
ENTRY(cpu_reset)
mrs x1, sctlr_el1
bic x1, x1, #1
@@ -75,6 +75,7 @@ ENTRY(cpu_reset)
isb
ret x0
ENDPROC(cpu_reset)
+ .popsection
ENTRY(cpu_soft_restart)
/* Save address of cpu_reset() and reset address */
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 03/10] arm64: drop sleep_idmap_phys and clean up cpu_resume()
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
2015-05-11 7:12 ` [PATCH 01/10] arm64/efi: use dynamically allocated space for EFI pgd Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 02/10] arm64: reduce ID map to a single page Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 04/10] arm64: use more granular reservations for static page table allocations Ard Biesheuvel
` (7 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
Two cleanups of the asm function cpu_resume():
- The global variable sleep_idmap_phys always points to idmap_pg_dir,
so we can just use that value directly in the CPU resume path.
- Unclutter the load of sleep_save_sp::save_ptr_stash_phys.
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Tested-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/kernel/sleep.S | 7 ++-----
arch/arm64/kernel/suspend.c | 3 ---
2 files changed, 2 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 811e61a2d847..803cfea41962 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -164,15 +164,12 @@ ENTRY(cpu_resume)
#else
mov x7, xzr
#endif
- adrp x0, sleep_save_sp
- add x0, x0, #:lo12:sleep_save_sp
- ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
+ ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
ldr x0, [x0, x7, lsl #3]
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
- adrp x1, sleep_idmap_phys
/* load physical address of identity map page table in x1 */
- ldr x1, [x1, #:lo12:sleep_idmap_phys]
+ adrp x1, idmap_pg_dir
mov sp, x2
/*
* cpu_do_resume expects x0 to contain context physical address
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index d7daf45ae7a2..f6073c27d65f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -118,7 +118,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
}
struct sleep_save_sp sleep_save_sp;
-phys_addr_t sleep_idmap_phys;
static int __init cpu_suspend_init(void)
{
@@ -132,9 +131,7 @@ static int __init cpu_suspend_init(void)
sleep_save_sp.save_ptr_stash = ctx_ptr;
sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
- sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
- __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
return 0;
}
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 04/10] arm64: use more granular reservations for static page table allocations
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (2 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 03/10] arm64: drop sleep_idmap_phys and clean up cpu_resume() Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 05/10] arm64: split off early mapping code from early_fixmap_init() Ard Biesheuvel
` (6 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
Before introducing new statically allocated page tables and increasing
their alignment in subsequent patches, update the reservation logic
so that only pages that are in actual use end up as reserved with
memblock.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/mm/init.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index a3469a436a73..373ea02bdb96 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -37,6 +37,7 @@
#include <asm/fixmap.h>
#include <asm/memory.h>
+#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -173,11 +174,13 @@ void __init arm64_memblock_init(void)
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
- memblock_reserve(__pa(_text), _end - _text);
+ memblock_reserve(__pa(_text), __bss_stop - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
+ memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
+ memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
if (!efi_enabled(EFI_MEMMAP))
early_init_fdt_scan_reserved_mem();
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 05/10] arm64: split off early mapping code from early_fixmap_init()
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (3 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 04/10] arm64: use more granular reservations for static page table allocations Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 06/10] arm64: mm: explicitly bootstrap the linear mapping Ard Biesheuvel
` (5 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
This splits off and generalises the population of the statically
allocated fixmap page tables so that we may reuse it later for
the linear mapping once we move the kernel text mapping out of it.
This also involves taking into account that table entries at any of
the levels we are populating may have been populated already, since
the fixmap mapping might not be disjoint up to the pgd level anymore
from other early mappings.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/include/asm/compiler.h | 2 ++
arch/arm64/kernel/vmlinux.lds.S | 12 ++++----
arch/arm64/mm/mmu.c | 60 +++++++++++++++++++++++++++------------
3 files changed, 51 insertions(+), 23 deletions(-)
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index ee35fd0f2236..dd342af63673 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -27,4 +27,6 @@
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+#define __pgdir __attribute__((section(".pgdir"),aligned(PAGE_SIZE)))
+
#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 98073332e2d0..ceec4def354b 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -160,11 +160,13 @@ SECTIONS
BSS_SECTION(0, 0, 0)
- . = ALIGN(PAGE_SIZE);
- idmap_pg_dir = .;
- . += IDMAP_DIR_SIZE;
- swapper_pg_dir = .;
- . += SWAPPER_DIR_SIZE;
+ .pgdir (NOLOAD) : ALIGN(PAGE_SIZE) {
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+ swapper_pg_dir = .;
+ . += SWAPPER_DIR_SIZE;
+ *(.pgdir)
+ }
_end = .;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 82d3435bf14f..f64a817af469 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -342,6 +342,44 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
}
#endif
+struct bootstrap_pgtables {
+ pte_t pte[PTRS_PER_PTE];
+ pmd_t pmd[PTRS_PER_PMD > 1 ? PTRS_PER_PMD : 0];
+ pud_t pud[PTRS_PER_PUD > 1 ? PTRS_PER_PUD : 0];
+};
+
+static void __init bootstrap_early_mapping(unsigned long addr,
+ struct bootstrap_pgtables *reg,
+ bool pte_level)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd)) {
+ clear_page(reg->pud);
+ memblock_reserve(__pa(reg->pud), PAGE_SIZE);
+ pgd_populate(&init_mm, pgd, reg->pud);
+ }
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ clear_page(reg->pmd);
+ memblock_reserve(__pa(reg->pmd), PAGE_SIZE);
+ pud_populate(&init_mm, pud, reg->pmd);
+ }
+
+ if (!pte_level)
+ return;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ clear_page(reg->pte);
+ memblock_reserve(__pa(reg->pte), PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, reg->pte);
+ }
+}
+
static void __init map_mem(void)
{
struct memblock_region *reg;
@@ -555,14 +593,6 @@ void vmemmap_free(unsigned long start, unsigned long end)
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_PGTABLE_LEVELS > 2
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_PGTABLE_LEVELS > 3
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
static inline pud_t * fixmap_pud(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
@@ -592,21 +622,15 @@ static inline pte_t * fixmap_pte(unsigned long addr)
void __init early_fixmap_init(void)
{
- pgd_t *pgd;
- pud_t *pud;
+ static struct bootstrap_pgtables fixmap_bs_pgtables __pgdir;
pmd_t *pmd;
- unsigned long addr = FIXADDR_START;
- pgd = pgd_offset_k(addr);
- pgd_populate(&init_mm, pgd, bm_pud);
- pud = pud_offset(pgd, addr);
- pud_populate(&init_mm, pud, bm_pmd);
- pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
+ bootstrap_early_mapping(FIXADDR_START, &fixmap_bs_pgtables, true);
+ pmd = fixmap_pmd(FIXADDR_START);
/*
* The boot-ioremap range spans multiple pmds, for which
- * we are not preparted:
+ * we are not prepared:
*/
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 06/10] arm64: mm: explicitly bootstrap the linear mapping
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (4 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 05/10] arm64: split off early mapping code from early_fixmap_init() Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 07/10] arm64: move kernel mapping out of linear region Ard Biesheuvel
` (4 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
In preparation of moving the kernel text out of the linear
mapping, ensure that the part of the kernel Image that contains
the statically allocated page tables is made accessible via the
linear mapping before performing the actual mapping of all of
memory. This is needed by the normal mapping routines, that rely
on the linear mapping to walk the page tables while manipulating
them.
In addition, explicitly map the start of DRAM and set the memblock
limit so that all early memblock allocations are done from a region
that is guaranteed to be mapped.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/kernel/vmlinux.lds.S | 19 ++++++-
arch/arm64/mm/mmu.c | 109 +++++++++++++++++++++++++++++-----------
2 files changed, 98 insertions(+), 30 deletions(-)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index ceec4def354b..0b82c4c203fb 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -68,6 +68,18 @@ PECOFF_FILE_ALIGNMENT = 0x200;
#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
#endif
+/*
+ * The pgdir region needs to be mappable using a single PMD or PUD sized region,
+ * so it should not cross a 512 MB or 1 GB alignment boundary, respectively
+ * (depending on page size). So align to a power-of-2 upper bound of the size
+ * of the entire __pgdir section.
+ */
+#if CONFIG_ARM64_PGTABLE_LEVELS == 2
+#define PGDIR_ALIGN (8 * PAGE_SIZE)
+#else
+#define PGDIR_ALIGN (16 * PAGE_SIZE)
+#endif
+
SECTIONS
{
/*
@@ -160,7 +172,7 @@ SECTIONS
BSS_SECTION(0, 0, 0)
- .pgdir (NOLOAD) : ALIGN(PAGE_SIZE) {
+ .pgdir (NOLOAD) : ALIGN(PGDIR_ALIGN) {
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
swapper_pg_dir = .;
@@ -185,6 +197,11 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"ID map text too big or misaligned")
/*
+ * Check that the chosen PGDIR_ALIGN value is sufficient.
+ */
+ASSERT(SIZEOF(.pgdir) <= ALIGNOF(.pgdir), ".pgdir size exceeds its alignment")
+
+/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f64a817af469..32ce481d90bf 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -380,26 +380,92 @@ static void __init bootstrap_early_mapping(unsigned long addr,
}
}
-static void __init map_mem(void)
+/*
+ * Bootstrap a memory mapping in such a way that it does not require allocation
+ * of page tables beyond the ones that were allocated statically by
+ * bootstrap_early_mapping().
+ * This is done by finding the memblock that covers pa_base, and intersecting
+ * it with the naturally aligned 512 MB of 1 GB region (depending on page size)
+ * that covers pa_base as well and (on 4k pages) round it to section size.
+ */
+static unsigned long __init bootstrap_region(struct bootstrap_pgtables *reg,
+ phys_addr_t pa_base,
+ unsigned long va_offset)
{
- struct memblock_region *reg;
- phys_addr_t limit;
+ unsigned long va_base = __phys_to_virt(pa_base) + va_offset;
+ struct memblock_region *mr;
+
+ bootstrap_early_mapping(va_base, reg,
+ IS_ENABLED(CONFIG_ARM64_64K_PAGES));
+
+ for_each_memblock(memory, mr) {
+ phys_addr_t start = mr->base;
+ phys_addr_t end = start + mr->size;
+ unsigned long vstart, vend;
+
+ if (start > pa_base || end <= pa_base)
+ continue;
+
+#ifdef CONFIG_ARM64_64K_PAGES
+ /* clip the region to PMD size */
+ vstart = max(va_base & PMD_MASK,
+ round_up(__phys_to_virt(start) + va_offset,
+ PAGE_SIZE));
+ vend = min(round_up(va_base + 1, PMD_SIZE),
+ round_down(__phys_to_virt(end) + va_offset,
+ PAGE_SIZE));
+#else
+ /* clip the region to PUD size */
+ vstart = max(va_base & PUD_MASK,
+ round_up(__phys_to_virt(start) + va_offset,
+ PMD_SIZE));
+ vend = min(round_up(va_base + 1, PUD_SIZE),
+ round_down(__phys_to_virt(end) + va_offset,
+ PMD_SIZE));
+#endif
+
+ create_mapping(__pa(vstart - va_offset), vstart, vend - vstart,
+ PAGE_KERNEL_EXEC);
+
+ return vend;
+ }
+ return 0;
+}
+
+/*
+ * Bootstrap the linear ranges that cover the start of DRAM and swapper_pg_dir
+ * so that the statically allocated page tables as well as newly allocated ones
+ * are accessible via the linear mapping.
+ */
+static void __init bootstrap_linear_mapping(unsigned long va_offset)
+{
+ static struct bootstrap_pgtables __pgdir bs_pgdir_low, bs_pgdir_high;
+ unsigned long vend;
+
+ /* Bootstrap the mapping for the beginning of RAM */
+ vend = bootstrap_region(&bs_pgdir_low, memblock_start_of_DRAM(),
+ va_offset);
+ BUG_ON(vend == 0);
/*
* Temporarily limit the memblock range. We need to do this as
* create_mapping requires puds, pmds and ptes to be allocated from
- * memory addressable from the initial direct kernel mapping.
- *
- * The initial direct kernel mapping, located@swapper_pg_dir, gives
- * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
- * PHYS_OFFSET (which must be aligned to 2MB as per
- * Documentation/arm64/booting.txt).
+ * memory addressable from the early linear mapping.
*/
- if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
- limit = PHYS_OFFSET + PMD_SIZE;
- else
- limit = PHYS_OFFSET + PUD_SIZE;
- memblock_set_current_limit(limit);
+ memblock_set_current_limit(__pa(vend - va_offset));
+
+ /* Bootstrap the linear mapping of the kernel image */
+ vend = bootstrap_region(&bs_pgdir_high, __pa(swapper_pg_dir),
+ va_offset);
+ if (vend == 0)
+ panic("Kernel image not covered by memblock");
+}
+
+static void __init map_mem(void)
+{
+ struct memblock_region *reg;
+
+ bootstrap_linear_mapping(0);
/* map all the memory banks */
for_each_memblock(memory, reg) {
@@ -409,21 +475,6 @@ static void __init map_mem(void)
if (start >= end)
break;
-#ifndef CONFIG_ARM64_64K_PAGES
- /*
- * For the first memory bank align the start address and
- * current memblock limit to prevent create_mapping() from
- * allocating pte page tables from unmapped memory.
- * When 64K pages are enabled, the pte page table for the
- * first PGDIR_SIZE is already present in swapper_pg_dir.
- */
- if (start < limit)
- start = ALIGN(start, PMD_SIZE);
- if (end < limit) {
- limit = end & PMD_MASK;
- memblock_set_current_limit(limit);
- }
-#endif
__map_memblock(start, end);
}
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 07/10] arm64: move kernel mapping out of linear region
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (5 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 06/10] arm64: mm: explicitly bootstrap the linear mapping Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 08/10] arm64: map linear region as non-executable Ard Biesheuvel
` (3 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
This moves the primary mapping of the kernel Image out of
the linear region. This is a preparatory step towards allowing
the kernel Image to reside anywhere in physical memory without
affecting the ability to map all of it efficiently.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/include/asm/boot.h | 7 +++++++
arch/arm64/include/asm/memory.h | 19 ++++++++++++++++---
arch/arm64/kernel/head.S | 18 +++++++++++++-----
arch/arm64/kernel/vmlinux.lds.S | 11 +++++++++--
arch/arm64/mm/init.c | 5 +++--
arch/arm64/mm/mmu.c | 10 +++++++++-
6 files changed, 57 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index 81151b67b26b..092d1096ce9a 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -11,4 +11,11 @@
#define MIN_FDT_ALIGN 8
#define MAX_FDT_SIZE SZ_2M
+/*
+ * arm64 requires the kernel image to be 2 MB aligned and
+ * not exceed 64 MB in size.
+ */
+#define MIN_KIMG_ALIGN SZ_2M
+#define MAX_KIMG_SIZE SZ_64M
+
#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f800d45ea226..050ede7cd1ee 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -24,6 +24,7 @@
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
+#include <asm/boot.h>
#include <asm/sizes.h>
/*
@@ -39,7 +40,12 @@
#define PCI_IO_SIZE SZ_16M
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * Offset below PAGE_OFFSET where to map the kernel Image.
+ */
+#define KIMAGE_OFFSET MAX_KIMG_SIZE
+
+/*
+ * PAGE_OFFSET - the virtual address of the base of the linear mapping (top
* (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
@@ -49,7 +55,8 @@
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define MODULES_END (PAGE_OFFSET)
+#define KIMAGE_VADDR (PAGE_OFFSET - KIMAGE_OFFSET)
+#define MODULES_END KIMAGE_VADDR
#define MODULES_VADDR (MODULES_END - SZ_64M)
#define PCI_IO_END (MODULES_VADDR - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
@@ -77,7 +84,11 @@
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
-#define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
+#define __virt_to_phys(x) ({ \
+ long __x = (long)(x) - PAGE_OFFSET; \
+ __x >= 0 ? (phys_addr_t)(__x + PHYS_OFFSET) : \
+ (phys_addr_t)(__x + PHYS_OFFSET + kernel_va_offset); })
+
#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
/*
@@ -113,6 +124,8 @@ extern phys_addr_t memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ memstart_addr; })
+extern u64 kernel_va_offset;
+
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c0ff3ce4299e..3050a4066db9 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -36,8 +36,6 @@
#include <asm/page.h>
#include <asm/virt.h>
-#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
-
#if (TEXT_OFFSET & 0xfff) != 0
#error TEXT_OFFSET must be at least 4KB aligned
#elif (PAGE_OFFSET & 0x1fffff) != 0
@@ -58,6 +56,8 @@
#define KERNEL_START _text
#define KERNEL_END _end
+#define KERNEL_BASE (KERNEL_START - TEXT_OFFSET)
+
/*
* Initial memory map attributes.
@@ -235,7 +235,15 @@ section_table:
ENTRY(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- adrp x24, __PHYS_OFFSET
+
+ /*
+ * Before the linear mapping has been set up, __va() translations will
+ * not produce usable virtual addresses unless we tweak PHYS_OFFSET to
+ * compensate for the offset between the kernel mapping and the base of
+ * the linear mapping. We will undo this in map_mem().
+ */
+ adrp x24, KERNEL_BASE + KIMAGE_OFFSET
+
bl set_cpu_boot_mode_flag
bl __create_page_tables // x25=TTBR0, x26=TTBR1
/*
@@ -411,10 +419,10 @@ __create_page_tables:
* Map the kernel image (starting with PHYS_OFFSET).
*/
mov x0, x26 // swapper_pg_dir
- mov x5, #PAGE_OFFSET
+ ldr x5, =KERNEL_BASE
create_pgd_entry x0, x5, x3, x6
ldr x6, =KERNEL_END // __va(KERNEL_END)
- mov x3, x24 // phys offset
+ adrp x3, KERNEL_BASE // real PHYS_OFFSET
create_block_map x0, x7, x3, x5, x6
/*
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 0b82c4c203fb..1f6d79eeda06 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
+#include <asm/boot.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -96,7 +97,7 @@ SECTIONS
*(.discard.*)
}
- . = PAGE_OFFSET + TEXT_OFFSET;
+ . = KIMAGE_VADDR + TEXT_OFFSET;
.head.text : {
_text = .;
@@ -204,4 +205,10 @@ ASSERT(SIZEOF(.pgdir) <= ALIGNOF(.pgdir), ".pgdir size exceeds its alignment")
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
-ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
+
+/*
+ * Make sure the memory footprint of the kernel Image does not exceed the limit.
+ */
+ASSERT(_end - _text + TEXT_OFFSET <= MAX_KIMG_SIZE,
+ "Kernel Image memory footprint exceeds MAX_KIMG_SIZE")
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 373ea02bdb96..3909a5fe7d7c 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -164,9 +164,10 @@ void __init arm64_memblock_init(void)
* with the linear mapping.
*/
const s64 linear_region_size = -(s64)PAGE_OFFSET;
+ u64 dram_base = memstart_addr - KIMAGE_OFFSET;
- memblock_remove(0, memstart_addr);
- memblock_remove(memstart_addr + linear_region_size, ULLONG_MAX);
+ memblock_remove(0, dram_base);
+ memblock_remove(dram_base + linear_region_size, ULLONG_MAX);
memblock_enforce_memory_limit(memory_limit);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 32ce481d90bf..c5c8d0f886e9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -50,6 +50,8 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
+u64 kernel_va_offset __read_mostly;
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -436,6 +438,9 @@ static unsigned long __init bootstrap_region(struct bootstrap_pgtables *reg,
* Bootstrap the linear ranges that cover the start of DRAM and swapper_pg_dir
* so that the statically allocated page tables as well as newly allocated ones
* are accessible via the linear mapping.
+ * Since@this point, PHYS_OFFSET is still biased to redirect __va()
+ * translations into the kernel text mapping, we need to apply an
+ * explicit va_offset to calculate linear virtual addresses.
*/
static void __init bootstrap_linear_mapping(unsigned long va_offset)
{
@@ -465,7 +470,10 @@ static void __init map_mem(void)
{
struct memblock_region *reg;
- bootstrap_linear_mapping(0);
+ bootstrap_linear_mapping(KIMAGE_OFFSET);
+
+ kernel_va_offset = KIMAGE_OFFSET;
+ memstart_addr -= KIMAGE_OFFSET;
/* map all the memory banks */
for_each_memblock(memory, reg) {
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 08/10] arm64: map linear region as non-executable
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (6 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 07/10] arm64: move kernel mapping out of linear region Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 09/10] arm64: allow kernel Image to be loaded anywhere in physical memory Ard Biesheuvel
` (2 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
Now that we moved the kernel text out of the linear region, there
is no longer a reason to map the linear region as executable. This
also allows us to completely get rid of the __map_mem() variant that
only maps some of it executable if CONFIG_DEBUG_RODATA is selected.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/mm/mmu.c | 41 ++---------------------------------------
1 file changed, 2 insertions(+), 39 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c5c8d0f886e9..9c94c8c78da7 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -302,47 +302,10 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
phys, virt, size, prot, late_alloc);
}
-#ifdef CONFIG_DEBUG_RODATA
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
{
- /*
- * Set up the executable regions using the existing section mappings
- * for now. This will get more fine grained later once all memory
- * is mapped
- */
- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
-
- if (end < kernel_x_start) {
- create_mapping(start, __phys_to_virt(start),
- end - start, PAGE_KERNEL);
- } else if (start >= kernel_x_end) {
- create_mapping(start, __phys_to_virt(start),
- end - start, PAGE_KERNEL);
- } else {
- if (start < kernel_x_start)
- create_mapping(start, __phys_to_virt(start),
- kernel_x_start - start,
- PAGE_KERNEL);
- create_mapping(kernel_x_start,
- __phys_to_virt(kernel_x_start),
- kernel_x_end - kernel_x_start,
- PAGE_KERNEL_EXEC);
- if (kernel_x_end < end)
- create_mapping(kernel_x_end,
- __phys_to_virt(kernel_x_end),
- end - kernel_x_end,
- PAGE_KERNEL);
- }
-
-}
-#else
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
-{
- create_mapping(start, __phys_to_virt(start), end - start,
- PAGE_KERNEL_EXEC);
+ create_mapping(start, __phys_to_virt(start), end - start, PAGE_KERNEL);
}
-#endif
struct bootstrap_pgtables {
pte_t pte[PTRS_PER_PTE];
@@ -427,7 +390,7 @@ static unsigned long __init bootstrap_region(struct bootstrap_pgtables *reg,
#endif
create_mapping(__pa(vstart - va_offset), vstart, vend - vstart,
- PAGE_KERNEL_EXEC);
+ PAGE_KERNEL);
return vend;
}
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 09/10] arm64: allow kernel Image to be loaded anywhere in physical memory
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (7 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 08/10] arm64: map linear region as non-executable Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-11 7:13 ` [PATCH 10/10] arm64/efi: adapt to relaxed kernel Image placement requirements Ard Biesheuvel
2015-05-22 5:43 ` [PATCH] fixup! arm64: allow kernel Image to be loaded anywhere in physical memory AKASHI Takahiro
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
This relaxes the kernel Image placement requirements, so that it
may be placed at any 2 MB aligned offset in physical memory.
This is accomplished by ignoring PHYS_OFFSET when installing
memblocks, and accounting for the apparent virtual offset of
the kernel Image (in addition to the 64 MB that it is moved
below PAGE_OFFSET). As a result, virtual address references
below PAGE_OFFSET are correctly mapped onto physical references
into the kernel Image regardless of where it sits in memory.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
Documentation/arm64/booting.txt | 20 +++++++++---------
arch/arm64/mm/init.c | 47 +++++++++++++++++++++++++++++++++++++----
arch/arm64/mm/mmu.c | 22 ++++++++++++++++---
3 files changed, 72 insertions(+), 17 deletions(-)
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 53f18e13d51c..7bd9feedb6f9 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -113,16 +113,16 @@ Header notes:
depending on selected features, and is effectively unbound.
The Image must be placed text_offset bytes from a 2MB aligned base
-address near the start of usable system RAM and called there. Memory
-below that base address is currently unusable by Linux, and therefore it
-is strongly recommended that this location is the start of system RAM.
-At least image_size bytes from the start of the image must be free for
-use by the kernel.
-
-Any memory described to the kernel (even that below the 2MB aligned base
-address) which is not marked as reserved from the kernel e.g. with a
-memreserve region in the device tree) will be considered as available to
-the kernel.
+address anywhere in usable system RAM and called there. At least
+image_size bytes from the start of the image must be free for use
+by the kernel.
+NOTE: versions prior to v4.2 cannot make use of memory below the
+physical offset of the Image so it is recommended that the Image be
+placed as close as possible to the start of system RAM.
+
+Any memory described to the kernel which is not marked as reserved from
+the kernel (e.g., with a memreserve region in the device tree) will be
+considered as available to the kernel.
Before jumping into the kernel, the following conditions must be met:
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3909a5fe7d7c..4ee01ebc4029 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,6 +35,7 @@
#include <linux/efi.h>
#include <linux/swiotlb.h>
+#include <asm/boot.h>
#include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
@@ -157,6 +158,45 @@ static int __init early_mem(char *p)
}
early_param("mem", early_mem);
+static void enforce_memory_limit(void)
+{
+ const phys_addr_t kbase = round_down(__pa(_text), MIN_KIMG_ALIGN);
+ u64 to_remove = memblock_phys_mem_size() - memory_limit;
+ phys_addr_t max_addr = 0;
+ struct memblock_region *r;
+
+ if (memory_limit == (phys_addr_t)ULLONG_MAX)
+ return;
+
+ /*
+ * The kernel may be high up in physical memory, so try to apply the
+ * limit below the kernel first, and only let the generic handling
+ * take over if it turns out we haven't clipped enough memory yet.
+ */
+ for_each_memblock(memory, r) {
+ if (r->base + r->size > kbase) {
+ u64 rem = min(to_remove, kbase - r->base);
+
+ max_addr = r->base + rem;
+ to_remove -= rem;
+ break;
+ }
+ if (to_remove <= r->size) {
+ max_addr = r->base + to_remove;
+ to_remove = 0;
+ break;
+ }
+ to_remove -= r->size;
+ }
+
+ /* truncate both memory and reserved regions */
+ memblock_remove_range(&memblock.memory, 0, max_addr);
+ memblock_remove_range(&memblock.reserved, 0, max_addr);
+
+ if (to_remove)
+ memblock_enforce_memory_limit(memory_limit);
+}
+
void __init arm64_memblock_init(void)
{
/*
@@ -164,12 +204,11 @@ void __init arm64_memblock_init(void)
* with the linear mapping.
*/
const s64 linear_region_size = -(s64)PAGE_OFFSET;
- u64 dram_base = memstart_addr - KIMAGE_OFFSET;
- memblock_remove(0, dram_base);
- memblock_remove(dram_base + linear_region_size, ULLONG_MAX);
+ memblock_remove(round_down(memblock_start_of_DRAM(), SZ_1G) +
+ linear_region_size, ULLONG_MAX);
- memblock_enforce_memory_limit(memory_limit);
+ enforce_memory_limit();
/*
* Register the kernel text, kernel data, initrd, and initial
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 9c94c8c78da7..7e3e6af9b55c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -432,11 +432,27 @@ static void __init bootstrap_linear_mapping(unsigned long va_offset)
static void __init map_mem(void)
{
struct memblock_region *reg;
+ u64 new_memstart_addr;
+ u64 new_va_offset;
- bootstrap_linear_mapping(KIMAGE_OFFSET);
+ /*
+ * Select a suitable value for the base of physical memory.
+ * This should be equal to or below the lowest usable physical
+ * memory address, and aligned to PUD/PMD size so that we can map
+ * it efficiently.
+ */
+ new_memstart_addr = round_down(memblock_start_of_DRAM(), SZ_1G);
+
+ /*
+ * Calculate the offset between the kernel text mapping that exists
+ * outside of the linear mapping, and its mapping in the linear region.
+ */
+ new_va_offset = memstart_addr - new_memstart_addr;
+
+ bootstrap_linear_mapping(new_va_offset);
- kernel_va_offset = KIMAGE_OFFSET;
- memstart_addr -= KIMAGE_OFFSET;
+ kernel_va_offset = new_va_offset;
+ memstart_addr = new_memstart_addr;
/* map all the memory banks */
for_each_memblock(memory, reg) {
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH 10/10] arm64/efi: adapt to relaxed kernel Image placement requirements
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (8 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 09/10] arm64: allow kernel Image to be loaded anywhere in physical memory Ard Biesheuvel
@ 2015-05-11 7:13 ` Ard Biesheuvel
2015-05-22 5:43 ` [PATCH] fixup! arm64: allow kernel Image to be loaded anywhere in physical memory AKASHI Takahiro
10 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-11 7:13 UTC (permalink / raw)
To: linux-arm-kernel
This adapts the EFI stub kernel placement to the new relaxed
requirements, by placing the kernel Image at the highest available
2 MB offset in physical memory.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/kernel/efi-stub.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index f5374065ad53..60ae3324e26e 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -10,6 +10,7 @@
*
*/
#include <linux/efi.h>
+#include <asm/boot.h>
#include <asm/efi.h>
#include <asm/sections.h>
@@ -28,8 +29,8 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) {
kernel_memsize = kernel_size + (_end - _edata);
- status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
- SZ_2M, reserve_addr);
+ status = efi_high_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
+ MIN_KIMG_ALIGN, reserve_addr, ULONG_MAX);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel\n");
return status;
--
1.9.1
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH] fixup! arm64: allow kernel Image to be loaded anywhere in physical memory
2015-05-11 7:12 [PATCH 00/10] arm64: relax Image placement rules Ard Biesheuvel
` (9 preceding siblings ...)
2015-05-11 7:13 ` [PATCH 10/10] arm64/efi: adapt to relaxed kernel Image placement requirements Ard Biesheuvel
@ 2015-05-22 5:43 ` AKASHI Takahiro
2015-05-22 6:34 ` Ard Biesheuvel
10 siblings, 1 reply; 13+ messages in thread
From: AKASHI Takahiro @ 2015-05-22 5:43 UTC (permalink / raw)
To: linux-arm-kernel
Ard,
In testing my kexec/dump code, I found that your patch doesn't work
with initrd. Since the virtual addresses of initrd are calc'ed earlier
than map_mem(), they have bogus values and unpack_to_rootfs() will fail.
This patch fixes the issue.
Please think of taking it in your next version.
---
arch/arm64/mm/mmu.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 7e3e6af..43c2317 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/initrd.h>
#include <linux/libfdt.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
@@ -452,6 +453,13 @@ static void __init map_mem(void)
bootstrap_linear_mapping(new_va_offset);
kernel_va_offset = new_va_offset;
+
+ /* Recalculate virtual addresses of initrd region */
+ if (initrd_start) {
+ initrd_start += new_va_offset;
+ initrd_end += new_va_offset;
+ }
+
memstart_addr = new_memstart_addr;
/* map all the memory banks */
--
1.7.9.5
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH] fixup! arm64: allow kernel Image to be loaded anywhere in physical memory
2015-05-22 5:43 ` [PATCH] fixup! arm64: allow kernel Image to be loaded anywhere in physical memory AKASHI Takahiro
@ 2015-05-22 6:34 ` Ard Biesheuvel
0 siblings, 0 replies; 13+ messages in thread
From: Ard Biesheuvel @ 2015-05-22 6:34 UTC (permalink / raw)
To: linux-arm-kernel
On 22 May 2015 at 07:43, AKASHI Takahiro <takahiro.akashi@linaro.org> wrote:
> Ard,
>
> In testing my kexec/dump code, I found that your patch doesn't work
> with initrd. Since the virtual addresses of initrd are calc'ed earlier
> than map_mem(), they have bogus values and unpack_to_rootfs() will fail.
>
> This patch fixes the issue.
> Please think of taking it in your next version.
Thanks a lot! This series is obviously still under review, but I will
squash this into the next version if it is still appropriate by then.
And I will make sure to test with initrd as well :-)
Regards,
Ard.
> ---
> arch/arm64/mm/mmu.c | 8 ++++++++
> 1 file changed, 8 insertions(+)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 7e3e6af..43c2317 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -21,6 +21,7 @@
> #include <linux/kernel.h>
> #include <linux/errno.h>
> #include <linux/init.h>
> +#include <linux/initrd.h>
> #include <linux/libfdt.h>
> #include <linux/mman.h>
> #include <linux/nodemask.h>
> @@ -452,6 +453,13 @@ static void __init map_mem(void)
> bootstrap_linear_mapping(new_va_offset);
>
> kernel_va_offset = new_va_offset;
> +
> + /* Recalculate virtual addresses of initrd region */
> + if (initrd_start) {
> + initrd_start += new_va_offset;
> + initrd_end += new_va_offset;
> + }
> +
> memstart_addr = new_memstart_addr;
>
> /* map all the memory banks */
> --
> 1.7.9.5
>
^ permalink raw reply [flat|nested] 13+ messages in thread