linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] generic early_ioremap support
@ 2013-11-28  2:44 Mark Salter
  2013-11-28  2:44 ` [PATCH 3/4] arm: add " Mark Salter
  2013-11-28  2:44 ` [PATCH 4/4] arm64: " Mark Salter
  0 siblings, 2 replies; 7+ messages in thread
From: Mark Salter @ 2013-11-28  2:44 UTC (permalink / raw)
  To: linux-arm-kernel

This patch series takes the common bits from the x86 early ioremap
implementation and creates a generic library which may be used by
other architectures. The early ioremap interfaces are intended for
situations where boot code needs to make temporary virtual mappings
before the normal ioremap interfaces are available. Typically, this
means before paging_init() has run.

These patches are layered on top of generic fixmap patches which
have been discussed here:

  http://lkml.org/lkml/2013/11/25/474

These patches (and undelying fixmap patches) may be found at:

  git://github.com/mosalter/linux.git (early-ioremap branch)

Mark Salter (4):
  Create generic early_ioremap() support
  x86: use generic early_ioremap
  arm: add early_ioremap support
  arm64: add early_ioremap support

 Documentation/arm64/memory.txt      |   4 +-
 arch/arm/Kconfig                    |  11 ++
 arch/arm/include/asm/Kbuild         |   1 +
 arch/arm/include/asm/fixmap.h       |  18 +++
 arch/arm/include/asm/io.h           |   1 +
 arch/arm/kernel/setup.c             |   3 +
 arch/arm/mm/Makefile                |   1 +
 arch/arm/mm/early_ioremap.c         |  93 ++++++++++++++
 arch/arm/mm/mmu.c                   |   2 +
 arch/arm64/Kconfig                  |   1 +
 arch/arm64/include/asm/Kbuild       |   1 +
 arch/arm64/include/asm/fixmap.h     |  68 ++++++++++
 arch/arm64/include/asm/io.h         |   1 +
 arch/arm64/include/asm/memory.h     |   1 +
 arch/arm64/kernel/early_printk.c    |   8 +-
 arch/arm64/kernel/head.S            |   9 +-
 arch/arm64/kernel/setup.c           |   2 +
 arch/arm64/mm/ioremap.c             |  77 ++++++++++++
 arch/arm64/mm/mmu.c                 |  41 ------
 arch/x86/Kconfig                    |   1 +
 arch/x86/include/asm/Kbuild         |   1 +
 arch/x86/include/asm/fixmap.h       |   6 +
 arch/x86/include/asm/io.h           |  14 +--
 arch/x86/mm/ioremap.c               | 224 +--------------------------------
 arch/x86/mm/pgtable_32.c            |   2 +-
 include/asm-generic/early_ioremap.h |  40 ++++++
 lib/Kconfig                         |   3 +
 lib/Makefile                        |   1 +
 lib/early_ioremap.c                 | 243 ++++++++++++++++++++++++++++++++++++
 29 files changed, 592 insertions(+), 286 deletions(-)
 create mode 100644 arch/arm/mm/early_ioremap.c
 create mode 100644 arch/arm64/include/asm/fixmap.h
 create mode 100644 include/asm-generic/early_ioremap.h
 create mode 100644 lib/early_ioremap.c

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 3/4] arm: add early_ioremap support
  2013-11-28  2:44 [PATCH 0/4] generic early_ioremap support Mark Salter
@ 2013-11-28  2:44 ` Mark Salter
  2013-11-28  2:44 ` [PATCH 4/4] arm64: " Mark Salter
  1 sibling, 0 replies; 7+ messages in thread
From: Mark Salter @ 2013-11-28  2:44 UTC (permalink / raw)
  To: linux-arm-kernel

This patch uses the generic early_ioremap code to implement
early_ioremap for ARM. The ARM-specific bits come mostly from
an earlier patch from Leif Lindholm <leif.lindholm@linaro.org>
here:

  https://lkml.org/lkml/2013/10/3/279

Signed-off-by: Mark Salter <msalter@redhat.com>
Tested-by: Leif Lindholm <leif.lindholm@linaro.org>
CC: Russell King <linux@arm.linux.org.uk>
CC: linux-arm-kernel at lists.infradead.org
---
 arch/arm/Kconfig              | 11 +++++
 arch/arm/include/asm/Kbuild   |  1 +
 arch/arm/include/asm/fixmap.h | 18 +++++++++
 arch/arm/include/asm/io.h     |  1 +
 arch/arm/kernel/setup.c       |  3 ++
 arch/arm/mm/Makefile          |  1 +
 arch/arm/mm/early_ioremap.c   | 93 +++++++++++++++++++++++++++++++++++++++++++
 arch/arm/mm/mmu.c             |  2 +
 8 files changed, 130 insertions(+)
 create mode 100644 arch/arm/mm/early_ioremap.c

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c1f1a7e..78a79a6a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1842,6 +1842,17 @@ config UACCESS_WITH_MEMCPY
 	  However, if the CPU data cache is using a write-allocate mode,
 	  this option is unlikely to provide any performance gain.
 
+config EARLY_IOREMAP
+	depends on MMU
+	bool "Provide early_ioremap() support for kernel initialization."
+	select GENERIC_EARLY_IOREMAP
+	help
+	  Provide a mechanism for kernel initialisation code to temporarily
+	  map, in a highmem-agnostic way, memory pages in before ioremap()
+	  and friends are available (before paging_init() has run). It uses
+	  the same virtual memory range as kmap so all early mappings must
+	  be unapped before paging_init() is called.
+
 config SECCOMP
 	bool
 	prompt "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index c38b58c..49ec506 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += auxvec.h
 generic-y += bitsperlong.h
 generic-y += cputime.h
 generic-y += current.h
+generic-y += early_ioremap.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 68ea615..e92b7a4 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -21,8 +21,26 @@ enum fixed_addresses {
 	FIX_KMAP_BEGIN,
 	FIX_KMAP_END = (FIXADDR_TOP - FIXADDR_START) >> PAGE_SHIFT,
 	__end_of_fixed_addresses
+/*
+ * 224 temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ *
+ * (P)re-using the FIXADDR region, which is used for highmem
+ * later on, and statically aligned to 1MB.
+ */
+#define NR_FIX_BTMAPS		32
+#define FIX_BTMAPS_SLOTS	7
+#define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+#define FIX_BTMAP_END		FIX_KMAP_BEGIN
+#define FIX_BTMAP_BEGIN		(FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1)
 };
 
+#define FIXMAP_PAGE_NORMAL (L_PTE_MT_WRITEBACK | L_PTE_YOUNG | L_PTE_PRESENT)
+#define FIXMAP_PAGE_IO (L_PTE_MT_DEV_NONSHARED | L_PTE_YOUNG | L_PTE_PRESENT)
+
+extern void __early_set_fixmap(enum fixed_addresses idx,
+			       phys_addr_t phys, pgprot_t flags);
+
 #include <asm-generic/fixmap.h>
 
 #endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 3c597c2..131e0ba 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -28,6 +28,7 @@
 #include <asm/byteorder.h>
 #include <asm/memory.h>
 #include <asm-generic/pci_iomap.h>
+#include <asm/early_ioremap.h>
 #include <xen/xen.h>
 
 /*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a8..04c1757 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -36,6 +36,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
+#include <asm/io.h>
 #include <asm/procinfo.h>
 #include <asm/psci.h>
 #include <asm/sections.h>
@@ -889,6 +890,8 @@ void __init setup_arch(char **cmdline_p)
 
 	parse_early_param();
 
+	early_ioremap_init();
+
 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
 
 	early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index ecfe6e5..fea855e 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -15,6 +15,7 @@ endif
 obj-$(CONFIG_MODULES)		+= proc-syms.o
 
 obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o
+obj-$(CONFIG_EARLY_IOREMAP)	+= early_ioremap.o
 obj-$(CONFIG_HIGHMEM)		+= highmem.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 
diff --git a/arch/arm/mm/early_ioremap.c b/arch/arm/mm/early_ioremap.c
new file mode 100644
index 0000000..c3e2bf2
--- /dev/null
+++ b/arch/arm/mm/early_ioremap.c
@@ -0,0 +1,93 @@
+/*
+ * early_ioremap() support for ARM
+ *
+ * Based on existing support in arch/x86/mm/ioremap.c
+ *
+ * Restrictions: currently only functional before paging_init()
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include <asm/mach/map.h>
+
+static pte_t bm_pte[PTRS_PER_PTE] __aligned(PTE_HWTABLE_SIZE) __initdata;
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+	unsigned int index = pgd_index(addr);
+	pgd_t *pgd = cpu_get_pgd() + index;
+	pud_t *pud = pud_offset(pgd, addr);
+	pmd_t *pmd = pmd_offset(pud, addr);
+
+	return pmd;
+}
+
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
+{
+	return &bm_pte[pte_index(addr)];
+}
+
+void __init early_ioremap_init(void)
+{
+	pmd_t *pmd;
+
+	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+
+	pmd_populate_kernel(NULL, pmd, bm_pte);
+
+	/*
+	 * Make sure we don't span multiple pmds.
+	 */
+	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+		WARN_ON(1);
+		pr_warn("pmd %p != %p\n",
+			pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
+		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+			fix_to_virt(FIX_BTMAP_BEGIN));
+		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
+			fix_to_virt(FIX_BTMAP_END));
+		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
+		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
+	}
+
+	early_ioremap_setup();
+}
+
+void __init __early_set_fixmap(enum fixed_addresses idx,
+			       phys_addr_t phys, pgprot_t flags)
+{
+	unsigned long addr = __fix_to_virt(idx);
+	pte_t *pte;
+	u64 desc;
+
+	if (idx > FIX_KMAP_END) {
+		BUG();
+		return;
+	}
+	pte = early_ioremap_pte(addr);
+
+	if (pgprot_val(flags))
+		set_pte_at(NULL, 0xfff00000, pte,
+			   pfn_pte(phys >> PAGE_SHIFT, flags));
+	else
+		pte_clear(NULL, addr, pte);
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+	desc = *pte;
+}
+
+void __init
+early_ioremap_shutdown(void)
+{
+	pmd_t *pmd;
+	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+	pmd_clear(pmd);
+}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 580ef2d..bef59b9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/pci.h>
+#include <asm/early_ioremap.h>
 
 #include "mm.h"
 #include "tcm.h"
@@ -1405,6 +1406,7 @@ void __init paging_init(const struct machine_desc *mdesc)
 {
 	void *zero_page;
 
+	early_ioremap_reset();
 	build_mem_type_table();
 	prepare_page_table();
 	map_lowmem();
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] arm64: add early_ioremap support
  2013-11-28  2:44 [PATCH 0/4] generic early_ioremap support Mark Salter
  2013-11-28  2:44 ` [PATCH 3/4] arm: add " Mark Salter
@ 2013-11-28  2:44 ` Mark Salter
  2013-12-05 16:28   ` Catalin Marinas
  1 sibling, 1 reply; 7+ messages in thread
From: Mark Salter @ 2013-11-28  2:44 UTC (permalink / raw)
  To: linux-arm-kernel

Signed-off-by: Mark Salter <msalter@redhat.com>
CC: Catalin Marinas <catalin.marinas@arm.com>
CC: Will Deacon <will.deacon@arm.com>
CC: Rob Landley <rob@landley.net>
CC: linux-arm-kernel at lists.infradead.org
CC: linux-doc at vger.kernel.org
---
 Documentation/arm64/memory.txt   |  4 +--
 arch/arm64/Kconfig               |  1 +
 arch/arm64/include/asm/Kbuild    |  1 +
 arch/arm64/include/asm/fixmap.h  | 68 +++++++++++++++++++++++++++++++++++
 arch/arm64/include/asm/io.h      |  1 +
 arch/arm64/include/asm/memory.h  |  1 +
 arch/arm64/kernel/early_printk.c |  8 +++--
 arch/arm64/kernel/head.S         |  9 ++---
 arch/arm64/kernel/setup.c        |  2 ++
 arch/arm64/mm/ioremap.c          | 77 ++++++++++++++++++++++++++++++++++++++++
 arch/arm64/mm/mmu.c              | 41 ---------------------
 11 files changed, 162 insertions(+), 51 deletions(-)
 create mode 100644 arch/arm64/include/asm/fixmap.h

diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index 5e054bf..953c81e 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -35,7 +35,7 @@ ffffffbc00000000	ffffffbdffffffff	   8GB		vmemmap
 
 ffffffbe00000000	ffffffbffbbfffff	  ~8GB		[guard, future vmmemap]
 
-ffffffbffbc00000	ffffffbffbdfffff	   2MB		earlyprintk device
+ffffffbffbc00000	ffffffbffbdfffff	   2MB		fixed mappings
 
 ffffffbffbe00000	ffffffbffbe0ffff	  64KB		PCI I/O space
 
@@ -60,7 +60,7 @@ fffffdfc00000000	fffffdfdffffffff	   8GB		vmemmap
 
 fffffdfe00000000	fffffdfffbbfffff	  ~8GB		[guard, future vmmemap]
 
-fffffdfffbc00000	fffffdfffbdfffff	   2MB		earlyprintk device
+fffffdfffbc00000	fffffdfffbdfffff	   2MB		fixed mappings
 
 fffffdfffbe00000	fffffdfffbe0ffff	  64KB		PCI I/O space
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 88c8b6c1..809c1b8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -12,6 +12,7 @@ config ARM64
 	select CLONE_BACKWARDS
 	select COMMON_CLK
 	select GENERIC_CLOCKEVENTS
+	select GENERIC_EARLY_IOREMAP
 	select GENERIC_IOMAP
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 519f89f..b7f99a3 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -10,6 +10,7 @@ generic-y += delay.h
 generic-y += div64.h
 generic-y += dma.h
 generic-y += emergency-restart.h
+generic-y += early_ioremap.h
 generic-y += errno.h
 generic-y += ftrace.h
 generic-y += hw_irq.h
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
new file mode 100644
index 0000000..a4b193d
--- /dev/null
+++ b/arch/arm64/include/asm/fixmap.h
@@ -0,0 +1,68 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
+ *
+ * Adapted from arch/x86_64 version.
+ *
+ */
+
+#ifndef _ASM_ARM64_FIXMAP_H
+#define _ASM_ARM64_FIXMAP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are
+ * page-sized. Use set_fixmap(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ */
+enum fixed_addresses {
+	FIX_EARLYCON,
+	__end_of_permanent_fixed_addresses,
+
+	/*
+	 * Temporary boot-time mappings, used by early_ioremap(),
+	 * before ioremap() is functional.
+	 */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define NR_FIX_BTMAPS		4
+#else
+#define NR_FIX_BTMAPS		64
+#endif
+#define FIX_BTMAPS_SLOTS	7
+#define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+	__end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
+
+#define FIXMAP_PAGE_NORMAL __pgprot(PROT_NORMAL | PTE_PXN | PTE_UXN)
+#define FIXMAP_PAGE_IO     __pgprot(PROT_DEVICE_nGnRE)
+
+extern void __early_set_fixmap(enum fixed_addresses idx,
+			       phys_addr_t phys, pgprot_t flags);
+
+#define __set_fixmap __early_set_fixmap
+
+#include <asm-generic/fixmap.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_ARM64_FIXMAP_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4cc813e..8fb2152 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -27,6 +27,7 @@
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 #include <asm/pgtable.h>
+#include <asm/early_ioremap.h>
 
 #include <xen/xen.h>
 
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3776217..4a6d7ec 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -50,6 +50,7 @@
 #define MODULES_END		(PAGE_OFFSET)
 #define MODULES_VADDR		(MODULES_END - SZ_64M)
 #define EARLYCON_IOBASE		(MODULES_VADDR - SZ_4M)
+#define FIXADDR_TOP		(MODULES_VADDR - SZ_2M - PAGE_SIZE)
 #define TASK_SIZE_64		(UL(1) << VA_BITS)
 
 #ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index fbb6e18..850d9a4 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -26,6 +26,8 @@
 #include <linux/amba/serial.h>
 #include <linux/serial_reg.h>
 
+#include <asm/fixmap.h>
+
 static void __iomem *early_base;
 static void (*printch)(char ch);
 
@@ -141,8 +143,10 @@ static int __init setup_early_printk(char *buf)
 	}
 	/* no options parsing yet */
 
-	if (paddr)
-		early_base = early_io_map(paddr, EARLYCON_IOBASE);
+	if (paddr) {
+		set_fixmap_io(FIX_EARLYCON, paddr);
+		early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON);
+	}
 
 	printch = match->printch;
 	early_console = &early_console_dev;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7009387..03adf8f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -411,7 +411,7 @@ ENDPROC(__calc_phys_offset)
  *   - identity mapping to enable the MMU (low address, TTBR0)
  *   - first few MB of the kernel linear mapping to jump to once the MMU has
  *     been enabled, including the FDT blob (TTBR1)
- *   - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
+ *   - pgd entry for fixed mappings (TTBR1)
  */
 __create_page_tables:
 	pgtbl	x25, x26, x24			// idmap_pg_dir and swapper_pg_dir addresses
@@ -464,15 +464,12 @@ __create_page_tables:
 	sub	x6, x6, #1			// inclusive range
 	create_block_map x0, x7, x3, x5, x6
 1:
-#ifdef CONFIG_EARLY_PRINTK
 	/*
-	 * Create the pgd entry for the UART mapping. The full mapping is done
-	 * later based earlyprintk kernel parameter.
+	 * Create the pgd entry for the fixed mappings.
 	 */
-	ldr	x5, =EARLYCON_IOBASE		// UART virtual address
+	ldr	x5, =FIXADDR_TOP		// Fixed mapping virtual address
 	add	x0, x26, #2 * PAGE_SIZE		// section table address
 	create_pgd_entry x26, x0, x5, x6, x7
-#endif
 	ret
 ENDPROC(__create_page_tables)
 	.ltorg
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 0bc5e4c..790871a 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -42,6 +42,7 @@
 #include <linux/of_fdt.h>
 #include <linux/of_platform.h>
 
+#include <asm/fixmap.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
 #include <asm/cputable.h>
@@ -216,6 +217,7 @@ void __init setup_arch(char **cmdline_p)
 
 	*cmdline_p = boot_command_line;
 
+	early_ioremap_init();
 	parse_early_param();
 
 	arm64_memblock_init();
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 2bb1d58..fb338ab 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -25,6 +25,10 @@
 #include <linux/vmalloc.h>
 #include <linux/io.h>
 
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
 static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
 				      pgprot_t prot, void *caller)
 {
@@ -98,3 +102,76 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
 				__builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_cache);
+
+#ifndef CONFIG_ARM64_64K_PAGES
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#endif
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+	pgd_t *pgd = pgd_offset_k(addr);
+	pud_t *pud = pud_offset(pgd, addr);
+	pmd_t *pmd = pmd_offset(pud, addr);
+
+	return pmd;
+}
+
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
+{
+	pmd_t *pmd = early_ioremap_pmd(addr);
+	return pte_offset_kernel(pmd, addr);
+}
+
+void __init early_ioremap_init(void)
+{
+	pmd_t *pmd;
+
+	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+#ifndef CONFIG_ARM64_64K_PAGES
+	/* need to populate pmd for 4k pagesize only */
+	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+#endif
+	/*
+	 * The boot-ioremap range spans multiple pmds, for which
+	 * we are not prepared:
+	 */
+	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+		WARN_ON(1);
+		pr_warn("pmd %p != %p\n",
+			pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
+		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+			fix_to_virt(FIX_BTMAP_BEGIN));
+		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
+			fix_to_virt(FIX_BTMAP_END));
+
+		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
+		pr_warn("FIX_BTMAP_BEGIN:     %d\n",
+			FIX_BTMAP_BEGIN);
+	}
+
+	early_ioremap_setup();
+}
+
+void __init __early_set_fixmap(enum fixed_addresses idx,
+			       phys_addr_t phys, pgprot_t flags)
+{
+	unsigned long addr = __fix_to_virt(idx);
+	pte_t *pte;
+
+	if (idx >= __end_of_fixed_addresses) {
+		BUG();
+		return;
+	}
+
+	pte = early_ioremap_pte(addr);
+
+	if (pgprot_val(flags))
+		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+	else {
+		pte_clear(&init_mm, addr, pte);
+		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+	}
+}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f557ebb..9849f7f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -252,47 +252,6 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 	} while (pgd++, addr = next, addr != end);
 }
 
-#ifdef CONFIG_EARLY_PRINTK
-/*
- * Create an early I/O mapping using the pgd/pmd entries already populated
- * in head.S as this function is called too early to allocated any memory. The
- * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
- */
-void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
-{
-	unsigned long size, mask;
-	bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *pte;
-
-	/*
-	 * No early pte entries with !ARM64_64K_PAGES configuration, so using
-	 * sections (pmd).
-	 */
-	size = page64k ? PAGE_SIZE : SECTION_SIZE;
-	mask = ~(size - 1);
-
-	pgd = pgd_offset_k(virt);
-	pud = pud_offset(pgd, virt);
-	if (pud_none(*pud))
-		return NULL;
-	pmd = pmd_offset(pud, virt);
-
-	if (page64k) {
-		if (pmd_none(*pmd))
-			return NULL;
-		pte = pte_offset_kernel(pmd, virt);
-		set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
-	} else {
-		set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
-	}
-
-	return (void __iomem *)((virt & mask) + (phys & ~mask));
-}
-#endif
-
 static void __init map_mem(void)
 {
 	struct memblock_region *reg;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] arm64: add early_ioremap support
  2013-11-28  2:44 ` [PATCH 4/4] arm64: " Mark Salter
@ 2013-12-05 16:28   ` Catalin Marinas
  2013-12-06 17:20     ` Mark Salter
  2013-12-17 19:15     ` Mark Salter
  0 siblings, 2 replies; 7+ messages in thread
From: Catalin Marinas @ 2013-12-05 16:28 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Nov 28, 2013 at 02:44:39AM +0000, Mark Salter wrote:
> --- /dev/null
> +++ b/arch/arm64/include/asm/fixmap.h
> @@ -0,0 +1,68 @@
> +/*
> + * fixmap.h: compile-time virtual memory allocation
> + *
> + * This file is subject to the terms and conditions of the GNU General Public
> + * License.  See the file "COPYING" in the main directory of this archive
> + * for more details.
> + *
> + * Copyright (C) 1998 Ingo Molnar
> + * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
> + *
> + * Adapted from arch/x86_64 version.
> + *
> + */
> +
> +#ifndef _ASM_ARM64_FIXMAP_H
> +#define _ASM_ARM64_FIXMAP_H
> +
> +#ifndef __ASSEMBLY__
> +#include <linux/kernel.h>
> +#include <asm/page.h>
> +
> +/*
> + * Here we define all the compile-time 'special' virtual
> + * addresses. The point is to have a constant address at
> + * compile time, but to set the physical address only
> + * in the boot process.
> + *
> + * These 'compile-time allocated' memory buffers are
> + * page-sized. Use set_fixmap(idx,phys) to associate
> + * physical memory with fixmap indices.
> + *
> + */
> +enum fixed_addresses {
> +	FIX_EARLYCON,
> +	__end_of_permanent_fixed_addresses,
> +
> +	/*
> +	 * Temporary boot-time mappings, used by early_ioremap(),
> +	 * before ioremap() is functional.
> +	 */

How temporary are this mappings? The early console may not be disabled
at run-time, so it still needs the mapping.

> +#ifdef CONFIG_ARM64_64K_PAGES
> +#define NR_FIX_BTMAPS		4
> +#else
> +#define NR_FIX_BTMAPS		64
> +#endif
> +#define FIX_BTMAPS_SLOTS	7
> +#define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
> +
> +	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
> +	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
> +	__end_of_fixed_addresses
> +};
> +
> +#define FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
> +#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
> +
> +#define FIXMAP_PAGE_NORMAL __pgprot(PROT_NORMAL | PTE_PXN | PTE_UXN)

I'll push a fix to change PROT_DEFAULT to (pgprot_default | PTE_DIRTY).

> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
> index 3776217..4a6d7ec 100644
> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -50,6 +50,7 @@
>  #define MODULES_END		(PAGE_OFFSET)
>  #define MODULES_VADDR		(MODULES_END - SZ_64M)
>  #define EARLYCON_IOBASE		(MODULES_VADDR - SZ_4M)
> +#define FIXADDR_TOP		(MODULES_VADDR - SZ_2M - PAGE_SIZE)
>  #define TASK_SIZE_64		(UL(1) << VA_BITS)

Can we remove EARLYCON_IOBASE?

> --- a/arch/arm64/mm/ioremap.c
> +++ b/arch/arm64/mm/ioremap.c
> @@ -25,6 +25,10 @@
>  #include <linux/vmalloc.h>
>  #include <linux/io.h>
>  
> +#include <asm/fixmap.h>
> +#include <asm/tlbflush.h>
> +#include <asm/pgalloc.h>
> +
>  static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
>  				      pgprot_t prot, void *caller)
>  {
> @@ -98,3 +102,76 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>  				__builtin_return_address(0));
>  }
>  EXPORT_SYMBOL(ioremap_cache);
> +
> +#ifndef CONFIG_ARM64_64K_PAGES
> +static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
> +#endif
> +
> +static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
> +{
> +	pgd_t *pgd = pgd_offset_k(addr);
> +	pud_t *pud = pud_offset(pgd, addr);
> +	pmd_t *pmd = pmd_offset(pud, addr);
> +
> +	return pmd;
> +}
> +
> +static inline pte_t * __init early_ioremap_pte(unsigned long addr)
> +{
> +	pmd_t *pmd = early_ioremap_pmd(addr);
> +	return pte_offset_kernel(pmd, addr);
> +}
> +
> +void __init early_ioremap_init(void)
> +{
> +	pmd_t *pmd;
> +
> +	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
> +#ifndef CONFIG_ARM64_64K_PAGES
> +	/* need to populate pmd for 4k pagesize only */
> +	pmd_populate_kernel(&init_mm, pmd, bm_pte);
> +#endif

Can we use some of the standard pmd_none() etc. checks which would be
eliminated for 2-level page tables?

-- 
Catalin

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4/4] arm64: add early_ioremap support
  2013-12-05 16:28   ` Catalin Marinas
@ 2013-12-06 17:20     ` Mark Salter
  2013-12-16 14:42       ` Catalin Marinas
  2013-12-17 19:15     ` Mark Salter
  1 sibling, 1 reply; 7+ messages in thread
From: Mark Salter @ 2013-12-06 17:20 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2013-12-05 at 16:28 +0000, Catalin Marinas wrote:
> On Thu, Nov 28, 2013 at 02:44:39AM +0000, Mark Salter wrote:
> > + * These 'compile-time allocated' memory buffers are
> > + * page-sized. Use set_fixmap(idx,phys) to associate
> > + * physical memory with fixmap indices.
> > + *
> > + */
> > +enum fixed_addresses {
> > +	FIX_EARLYCON,
> > +	__end_of_permanent_fixed_addresses,
> > +
> > +	/*
> > +	 * Temporary boot-time mappings, used by early_ioremap(),
> > +	 * before ioremap() is functional.
> > +	 */
> 
> How temporary are this mappings? The early console may not be disabled
> at run-time, so it still needs the mapping.

It varies by arch, but we have flexibility on arm64 because there is a
dedicated pmd which stays around forever. So, you see the FIX_EARLYCON
above is a "permanent" mapping which isn't really an early_ioremap
mapping. The earlyprintk code uses set_fixmap_io. I suppose this could
have been broken up into two patches, one fixmap, and one early_ioremap.
To answer your concern, the earlyprintk mapping doesn't go away. The
early_ioremap mappings should be temporary and there's a checker for
that which is run at late_initcall time.

> 
> > +#ifdef CONFIG_ARM64_64K_PAGES
> > +#define NR_FIX_BTMAPS		4
> > +#else
> > +#define NR_FIX_BTMAPS		64
> > +#endif
> > +#define FIX_BTMAPS_SLOTS	7
> > +#define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
> > +
> > +	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
> > +	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
> > +	__end_of_fixed_addresses
> > +};
> > +
> > +#define FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
> > +#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
> > +
> > +#define FIXMAP_PAGE_NORMAL __pgprot(PROT_NORMAL | PTE_PXN | PTE_UXN)
> 
> I'll push a fix to change PROT_DEFAULT to (pgprot_default | PTE_DIRTY).

okay

> 
> > diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
> > index 3776217..4a6d7ec 100644
> > --- a/arch/arm64/include/asm/memory.h
> > +++ b/arch/arm64/include/asm/memory.h
> > @@ -50,6 +50,7 @@
> >  #define MODULES_END		(PAGE_OFFSET)
> >  #define MODULES_VADDR		(MODULES_END - SZ_64M)
> >  #define EARLYCON_IOBASE		(MODULES_VADDR - SZ_4M)
> > +#define FIXADDR_TOP		(MODULES_VADDR - SZ_2M - PAGE_SIZE)
> >  #define TASK_SIZE_64		(UL(1) << VA_BITS)
> 
> Can we remove EARLYCON_IOBASE?

Yes. I had it out in an earlier local patch, but it snuck back in.

> 
> > --- a/arch/arm64/mm/ioremap.c
> > +++ b/arch/arm64/mm/ioremap.c
> > @@ -25,6 +25,10 @@
> >  #include <linux/vmalloc.h>
> >  #include <linux/io.h>
> >  
> > +#include <asm/fixmap.h>
> > +#include <asm/tlbflush.h>
> > +#include <asm/pgalloc.h>
> > +
> >  static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
> >  				      pgprot_t prot, void *caller)
> >  {
> > @@ -98,3 +102,76 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
> >  				__builtin_return_address(0));
> >  }
> >  EXPORT_SYMBOL(ioremap_cache);
> > +
> > +#ifndef CONFIG_ARM64_64K_PAGES
> > +static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
> > +#endif
> > +
> > +static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
> > +{
> > +	pgd_t *pgd = pgd_offset_k(addr);
> > +	pud_t *pud = pud_offset(pgd, addr);
> > +	pmd_t *pmd = pmd_offset(pud, addr);
> > +
> > +	return pmd;
> > +}
> > +
> > +static inline pte_t * __init early_ioremap_pte(unsigned long addr)
> > +{
> > +	pmd_t *pmd = early_ioremap_pmd(addr);
> > +	return pte_offset_kernel(pmd, addr);
> > +}
> > +
> > +void __init early_ioremap_init(void)
> > +{
> > +	pmd_t *pmd;
> > +
> > +	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
> > +#ifndef CONFIG_ARM64_64K_PAGES
> > +	/* need to populate pmd for 4k pagesize only */
> > +	pmd_populate_kernel(&init_mm, pmd, bm_pte);
> > +#endif
> 
> Can we use some of the standard pmd_none() etc. checks which would be
> eliminated for 2-level page tables?
> 

Probably. I'll look into it.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4/4] arm64: add early_ioremap support
  2013-12-06 17:20     ` Mark Salter
@ 2013-12-16 14:42       ` Catalin Marinas
  0 siblings, 0 replies; 7+ messages in thread
From: Catalin Marinas @ 2013-12-16 14:42 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Dec 06, 2013 at 05:20:49PM +0000, Mark Salter wrote:
> On Thu, 2013-12-05 at 16:28 +0000, Catalin Marinas wrote:
> > On Thu, Nov 28, 2013 at 02:44:39AM +0000, Mark Salter wrote:
> > > + * These 'compile-time allocated' memory buffers are
> > > + * page-sized. Use set_fixmap(idx,phys) to associate
> > > + * physical memory with fixmap indices.
> > > + *
> > > + */
> > > +enum fixed_addresses {
> > > +	FIX_EARLYCON,
> > > +	__end_of_permanent_fixed_addresses,
> > > +
> > > +	/*
> > > +	 * Temporary boot-time mappings, used by early_ioremap(),
> > > +	 * before ioremap() is functional.
> > > +	 */
> > 
> > How temporary are this mappings? The early console may not be disabled
> > at run-time, so it still needs the mapping.
> 
> It varies by arch, but we have flexibility on arm64 because there is a
> dedicated pmd which stays around forever. So, you see the FIX_EARLYCON
> above is a "permanent" mapping which isn't really an early_ioremap
> mapping. The earlyprintk code uses set_fixmap_io. I suppose this could
> have been broken up into two patches, one fixmap, and one early_ioremap.
> To answer your concern, the earlyprintk mapping doesn't go away. The
> early_ioremap mappings should be temporary and there's a checker for
> that which is run at late_initcall time.

OK, thanks for clarification, I don't think it's worth splitting the
patch.

-- 
Catalin

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4/4] arm64: add early_ioremap support
  2013-12-05 16:28   ` Catalin Marinas
  2013-12-06 17:20     ` Mark Salter
@ 2013-12-17 19:15     ` Mark Salter
  1 sibling, 0 replies; 7+ messages in thread
From: Mark Salter @ 2013-12-17 19:15 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2013-12-05 at 16:28 +0000, Catalin Marinas wrote:
> > +#define FIXMAP_PAGE_NORMAL __pgprot(PROT_NORMAL | PTE_PXN | PTE_UXN)
> 
> I'll push a fix to change PROT_DEFAULT to (pgprot_default | PTE_DIRTY).
> 

This doesn't help early_ioremap because pgprot_default gets set up in
init_mem_pgprot() which is called from paging_init(). The early_ioremaps
happen before paging_init(). Would it be okay to make init_mem_pgprot()
non-static and call it from setup_arch() before early_ioremap_init()?

--Mark

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-12-17 19:15 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-11-28  2:44 [PATCH 0/4] generic early_ioremap support Mark Salter
2013-11-28  2:44 ` [PATCH 3/4] arm: add " Mark Salter
2013-11-28  2:44 ` [PATCH 4/4] arm64: " Mark Salter
2013-12-05 16:28   ` Catalin Marinas
2013-12-06 17:20     ` Mark Salter
2013-12-16 14:42       ` Catalin Marinas
2013-12-17 19:15     ` Mark Salter

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).