From: santosh.shilimkar@ti.com (Santosh Shilimkar)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 7/8] ARM: mm: Recreate kernel mappings in early_paging_init()
Date: Fri, 21 Jun 2013 19:48:21 -0400 [thread overview]
Message-ID: <1371858502-10083-8-git-send-email-santosh.shilimkar@ti.com> (raw)
In-Reply-To: <1371858502-10083-1-git-send-email-santosh.shilimkar@ti.com>
This patch adds a step in the init sequence, in order to recreate
the kernel code/data page table mappings prior to full paging
initialization. This is necessary on LPAE systems that run out of
a physical address space outside the 4G limit. On these systems,
this implementation provides a machine descriptor hook that allows
the PHYS_OFFSET to be overridden in a machine specific fashion.
Based on Cyril's initial patch. The pv_table needs to be patched
again after switching to higher address space.
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Russell King <linux@arm.linux.org.uk>
Signed-off-by: R Sricharan <r.sricharan@ti.com>
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
---
arch/arm/include/asm/mach/arch.h | 1 +
arch/arm/kernel/setup.c | 3 ++
arch/arm/mm/mmu.c | 99 ++++++++++++++++++++++++++++++++++++++
3 files changed, 103 insertions(+)
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 308ad7d..e487b8e 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -43,6 +43,7 @@ struct machine_desc {
struct smp_operations *smp; /* SMP operations */
void (*fixup)(struct tag *, char **,
struct meminfo *);
+ void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index bdcd4dd..e2ebe6c 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -71,6 +71,7 @@ static int __init fpe_setup(char *line)
__setup("fpe=", fpe_setup);
#endif
+extern void early_paging_init(struct machine_desc *, struct proc_info_list *);
extern void paging_init(struct machine_desc *desc);
extern void sanity_check_meminfo(void);
extern void reboot_setup(char *str);
@@ -789,6 +790,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+ early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 280f91d..f8ef29b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -28,6 +28,7 @@
#include <asm/highmem.h>
#include <asm/system_info.h>
#include <asm/traps.h>
+#include <asm/procinfo.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -1264,6 +1265,104 @@ static void __init map_lowmem(void)
}
}
+#ifdef CONFIG_ARM_LPAE
+extern void fixup_pv_table(const void *, unsigned long, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
+extern const void *__pv_high_table_begin, *__pv_high_table_end;
+
+/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
+ unsigned long map_start, map_end;
+ pgd_t *pgd0, *pgdk;
+ pud_t *pud0, *pudk;
+ pmd_t *pmd0, *pmdk;
+ phys_addr_t phys;
+ int i;
+ unsigned long __pv_phys_offset_low;
+
+ /* remap kernel code and data */
+ map_start = init_mm.start_code;
+ map_end = init_mm.brk;
+
+ /* get a handle on things... */
+ pgd0 = pgd_offset_k(0);
+ pud0 = pud_offset(pgd0, 0);
+ pmd0 = pmd_offset(pud0, 0);
+
+ pgdk = pgd_offset_k(map_start);
+ pudk = pud_offset(pgdk, map_start);
+ pmdk = pmd_offset(pudk, map_start);
+
+ phys = PHYS_OFFSET;
+
+ if (mdesc->init_meminfo) {
+ mdesc->init_meminfo();
+ /* Run the patch stub to update the constants */
+ fixup_pv_table(&__pv_table_begin,
+ (&__pv_table_end - &__pv_table_begin) << 2,
+ __pv_offset);
+
+ fixup_pv_table(&__pv_high_table_begin,
+ (&__pv_high_table_end - &__pv_high_table_begin) << 2,
+ __pv_phys_offset >> PV_HIGH_SHIFT);
+
+ /*
+ * Cache cleaning operations for self-modifying code
+ * We should clean the entries by MVA but running a
+ * for loop over every pv_table entry pointer would
+ * just complicate the code.
+ */
+ flush_cache_louis();
+ dsb();
+ isb();
+
+ /*
+ * Set the flag to indicate whether __pv_offset is
+ * real or 2's complement after high address switch
+ */
+ __pv_phys_offset_low = __pv_phys_offset;
+ if (__pv_phys_offset_low < PAGE_OFFSET)
+ __pv_sign_flag = 1;
+ else
+ __pv_sign_flag = 0;
+ }
+
+ /* remap level 1 table */
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+ *pud0++ = __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER);
+ pmd0 += PTRS_PER_PMD;
+ }
+
+ /* remap pmds for kernel mapping */
+ phys = __pa(map_start) & PMD_MASK;
+ do {
+ *pmdk++ = __pmd(phys | pmdprot);
+ phys += PMD_SIZE;
+ } while (phys < map_end);
+
+ flush_cache_all();
+ cpu_set_ttbr(0, __pa(pgd0));
+ cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+ local_flush_tlb_all();
+}
+
+#else
+
+void __init early_paging_init(struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ if (mdesc->init_meminfo)
+ mdesc->init_meminfo();
+}
+
+#endif
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
--
1.7.9.5
next prev parent reply other threads:[~2013-06-21 23:48 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-06-21 23:48 [PATCH 0/8] ARM: mm: Extend the runtime patch stub for PAE systems Santosh Shilimkar
2013-06-21 23:48 ` [PATCH 1/8] ARM: mm: LPAE: use phys_addr_t appropriately in p2v and v2p conversions Santosh Shilimkar
2013-07-22 15:03 ` Nicolas Pitre
2013-06-21 23:48 ` [PATCH 2/8] ARM: mm: Introduce virt_to_idmap() with an arch hook Santosh Shilimkar
2013-06-21 23:48 ` [PATCH 3/8] ARM: mm: Move the idmap print to appropriate place in the code Santosh Shilimkar
2013-06-21 23:48 ` [PATCH 4/8] ARM: mm: Pass the constant as an argument to fixup_pv_table() Santosh Shilimkar
2013-06-21 23:48 ` [PATCH 5/8] ARM: mm: Add __pv_stub_mov to patch MOV instruction Santosh Shilimkar
2013-06-21 23:48 ` [PATCH 6/8] ARM: mm: LPAE: Correct virt_to_phys patching for 64 bit physical addresses Santosh Shilimkar
2013-07-24 1:10 ` Nicolas Pitre
2013-07-24 2:01 ` Santosh Shilimkar
2013-07-24 2:49 ` Nicolas Pitre
2013-07-24 11:50 ` Sricharan R
2013-07-24 12:07 ` Sricharan R
2013-07-24 14:04 ` Santosh Shilimkar
2013-07-24 20:21 ` Nicolas Pitre
2013-07-25 3:49 ` Sricharan R
2013-07-25 18:53 ` Santosh Shilimkar
2013-06-21 23:48 ` Santosh Shilimkar [this message]
2013-06-21 23:48 ` [PATCH 8/8] ARM: keystone: Switch over to high physical address range Santosh Shilimkar
2013-06-22 1:51 ` [PATCH 0/8] ARM: mm: Extend the runtime patch stub for PAE systems Nicolas Pitre
2013-06-22 2:17 ` Santosh Shilimkar
2013-07-16 18:42 ` Santosh Shilimkar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1371858502-10083-8-git-send-email-santosh.shilimkar@ti.com \
--to=santosh.shilimkar@ti.com \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).