linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCHv3 06/11] arm: use is_vmalloc_addr
       [not found] <1388699609-18214-1-git-send-email-lauraa@codeaurora.org>
@ 2014-01-02 21:53 ` Laura Abbott
  2014-01-02 22:13   ` Dave Hansen
  2014-01-02 21:53 ` [RFC PATCHv3 07/11] arm: mm: Add iotable_init_novmreserve Laura Abbott
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 5+ messages in thread
From: Laura Abbott @ 2014-01-02 21:53 UTC (permalink / raw)
  To: linux-arm-kernel

is_vmalloc_addr already does the range checking against VMALLOC_START and
VMALLOC_END. Use it.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
---
 arch/arm/mm/iomap.c |    3 +--
 1 files changed, 1 insertions(+), 2 deletions(-)

diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index 4614208..4bf5457 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -34,8 +34,7 @@ EXPORT_SYMBOL(pcibios_min_mem);
 
 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 {
-	if ((unsigned long)addr >= VMALLOC_START &&
-	    (unsigned long)addr < VMALLOC_END)
+	if (is_vmalloc_addr(addr))
 		iounmap(addr);
 }
 EXPORT_SYMBOL(pci_iounmap);
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [RFC PATCHv3 07/11] arm: mm: Add iotable_init_novmreserve
       [not found] <1388699609-18214-1-git-send-email-lauraa@codeaurora.org>
  2014-01-02 21:53 ` [RFC PATCHv3 06/11] arm: use is_vmalloc_addr Laura Abbott
@ 2014-01-02 21:53 ` Laura Abbott
  2014-01-02 21:53 ` [RFC PATCHv3 09/11] arm: mm: Track lowmem in vmalloc Laura Abbott
  2014-01-02 21:53 ` [RFC PATCHv3 10/11] arm: Use for_each_potential_vmalloc_area Laura Abbott
  3 siblings, 0 replies; 5+ messages in thread
From: Laura Abbott @ 2014-01-02 21:53 UTC (permalink / raw)
  To: linux-arm-kernel

iotable_init is currently used by dma_contiguous_remap to remap
CMA memory appropriately. This has the side effect of reserving
the area of CMA in the vmalloc tracking structures. This is fine
under normal circumstances but it creates conflicts if we want
to track lowmem in vmalloc. Since dma_contiguous_remap is only
really concerned with the remapping, introduce iotable_init_novmreserve
to allow remapping of pages without reserving the virtual address
in vmalloc space.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
---
 arch/arm/include/asm/mach/map.h |    2 ++
 arch/arm/mm/dma-mapping.c       |    2 +-
 arch/arm/mm/ioremap.c           |    5 +++--
 arch/arm/mm/mm.h                |    2 +-
 arch/arm/mm/mmu.c               |   17 ++++++++++++++---
 5 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 2fe141f..02e3509 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -37,6 +37,7 @@ struct map_desc {
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
+extern void iotable_init_novmreserve(struct map_desc *, int);
 extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
 				  void *caller);
 
@@ -56,6 +57,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys,
 			const struct mem_type *mtype);
 #else
 #define iotable_init(map,num)	do { } while (0)
+#define iotable_init_novmreserve(map,num)	do { } while(0)
 #define vm_reserve_area_early(a,s,c)	do { } while (0)
 #endif
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f61a570..c4c9f4b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -470,7 +470,7 @@ void __init dma_contiguous_remap(void)
 		     addr += PMD_SIZE)
 			pmd_clear(pmd_off_k(addr));
 
-		iotable_init(&map, 1);
+		iotable_init_novmreserve(&map, 1);
 	}
 }
 
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index f123d6e..ad92d4f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -84,14 +84,15 @@ struct static_vm *find_static_vm_vaddr(void *vaddr)
 	return NULL;
 }
 
-void __init add_static_vm_early(struct static_vm *svm)
+void __init add_static_vm_early(struct static_vm *svm, bool add_to_vm)
 {
 	struct static_vm *curr_svm;
 	struct vm_struct *vm;
 	void *vaddr;
 
 	vm = &svm->vm;
-	vm_area_add_early(vm);
+	if (add_to_vm)
+		vm_area_add_early(vm);
 	vaddr = vm->addr;
 
 	list_for_each_entry(curr_svm, &static_vmlist, list) {
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a982d..6f9d28b 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -75,7 +75,7 @@ struct static_vm {
 
 extern struct list_head static_vmlist;
 extern struct static_vm *find_static_vm_vaddr(void *vaddr);
-extern __init void add_static_vm_early(struct static_vm *svm);
+extern __init void add_static_vm_early(struct static_vm *svm, bool add_to_vm);
 
 #endif
 
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 580ef2d..5450b43 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -819,7 +819,8 @@ static void __init create_mapping(struct map_desc *md)
 /*
  * Create the architecture specific mappings
  */
-void __init iotable_init(struct map_desc *io_desc, int nr)
+static void __init __iotable_init(struct map_desc *io_desc, int nr,
+					bool add_to_vm)
 {
 	struct map_desc *md;
 	struct vm_struct *vm;
@@ -840,10 +841,20 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
 		vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
 		vm->flags |= VM_ARM_MTYPE(md->type);
 		vm->caller = iotable_init;
-		add_static_vm_early(svm++);
+		add_static_vm_early(svm++, add_to_vm);
 	}
 }
 
+void __init iotable_init(struct map_desc *io_desc, int nr)
+{
+	return __iotable_init(io_desc, nr, true);
+}
+
+void __init iotable_init_novmreserve(struct map_desc *io_desc, int nr)
+{
+	return __iotable_init(io_desc, nr, false);
+}
+
 void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
 				  void *caller)
 {
@@ -857,7 +868,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
 	vm->size = size;
 	vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
 	vm->caller = caller;
-	add_static_vm_early(svm);
+	add_static_vm_early(svm, true);
 }
 
 #ifndef CONFIG_ARM_LPAE
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [RFC PATCHv3 09/11] arm: mm: Track lowmem in vmalloc
       [not found] <1388699609-18214-1-git-send-email-lauraa@codeaurora.org>
  2014-01-02 21:53 ` [RFC PATCHv3 06/11] arm: use is_vmalloc_addr Laura Abbott
  2014-01-02 21:53 ` [RFC PATCHv3 07/11] arm: mm: Add iotable_init_novmreserve Laura Abbott
@ 2014-01-02 21:53 ` Laura Abbott
  2014-01-02 21:53 ` [RFC PATCHv3 10/11] arm: Use for_each_potential_vmalloc_area Laura Abbott
  3 siblings, 0 replies; 5+ messages in thread
From: Laura Abbott @ 2014-01-02 21:53 UTC (permalink / raw)
  To: linux-arm-kernel

Rather than always keeping lowmem and vmalloc separate, we can
now allow the two to be mixed. This means that all lowmem areas
need to be explicitly tracked in vmalloc to avoid over allocating.
Additionally, adjust the vmalloc reserve to account for the fact
that there may be a hole in the middle consisting of vmalloc.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
---
 arch/arm/Kconfig   |    3 +
 arch/arm/mm/init.c |  104 ++++++++++++++++++++++++++++++++++++----------------
 arch/arm/mm/mm.h   |    1 +
 arch/arm/mm/mmu.c  |   29 ++++++++++++++
 4 files changed, 105 insertions(+), 32 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c1f1a7e..fc7aef2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -277,6 +277,9 @@ config GENERIC_BUG
 	def_bool y
 	depends on BUG
 
+config ARCH_TRACKS_VMALLOC
+	bool
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1f7b19a..ddfab22 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -574,6 +574,46 @@ static void __init free_highpages(void)
 #endif
 }
 
+#define MLK(b, t) b, t, ((t) - (b)) >> 10
+#define MLM(b, t) b, t, ((t) - (b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
+
+#ifdef CONFIG_VMALLOC_INTERMIX
+void print_vmalloc_lowmem_info(void)
+{
+	int i;
+	void *va_start, *va_end;
+
+	printk(KERN_NOTICE
+		"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+		MLM(VMALLOC_START, VMALLOC_END));
+
+	for (i = meminfo.nr_banks - 1; i >= 0; i--) {
+		if (!meminfo.bank[i].highmem) {
+			va_start = __va(meminfo.bank[i].start);
+			va_end = __va(meminfo.bank[i].start +
+						meminfo.bank[i].size);
+			printk(KERN_NOTICE
+			 "	    lowmem : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+			MLM((unsigned long)va_start, (unsigned long)va_end));
+		}
+		if (i && ((meminfo.bank[i-1].start + meminfo.bank[i-1].size) !=
+			   meminfo.bank[i].start)) {
+			if (meminfo.bank[i-1].start + meminfo.bank[i-1].size
+				   <= MAX_HOLE_ADDRESS) {
+				va_start = __va(meminfo.bank[i-1].start
+						+ meminfo.bank[i-1].size);
+				va_end = __va(meminfo.bank[i].start);
+				printk(KERN_NOTICE
+				"	   vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+					   MLM((unsigned long)va_start,
+						   (unsigned long)va_end));
+			}
+		}
+	}
+}
+#endif
+
 /*
  * mem_init() marks the free areas in the mem_map and tells us how much
  * memory is free.  This is done after various parts of the system have
@@ -602,55 +642,52 @@ void __init mem_init(void)
 
 	mem_init_print_info(NULL);
 
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
 	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
 			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #ifdef CONFIG_HAVE_TCM
 			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #endif
-			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
-			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
-			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#endif
-			"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
-			"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
-			"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
-			"       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
-
+			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n",
 			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
 				(PAGE_SIZE)),
 #ifdef CONFIG_HAVE_TCM
 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
 #endif
-			MLK(FIXADDR_START, FIXADDR_TOP),
-			MLM(VMALLOC_START, VMALLOC_END),
-			MLM(PAGE_OFFSET, (unsigned long)high_memory),
+			MLK(FIXADDR_START, FIXADDR_TOP));
+#ifdef CONFIG_VMALLOC_INTERMIX
+	print_vmalloc_lowmem_info();
+#else
+	printk(KERN_NOTICE
+		   "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		   "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n",
+		   MLM(VMALLOC_START, VMALLOC_END),
+		   MLM(PAGE_OFFSET, (unsigned long)high_memory));
+#endif
 #ifdef CONFIG_HIGHMEM
-			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
+	printk(KERN_NOTICE
+		   "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+#endif
+#ifdef CONFIG_MODULES
+		   "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+#endif
+		   "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
+		   "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
+		   "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
+		   "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
+#ifdef CONFIG_HIGHMEM
+		   MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
 				(PAGE_SIZE)),
 #endif
 #ifdef CONFIG_MODULES
-			MLM(MODULES_VADDR, MODULES_END),
+		   MLM(MODULES_VADDR, MODULES_END),
 #endif
 
-			MLK_ROUNDUP(_text, _etext),
-			MLK_ROUNDUP(__init_begin, __init_end),
-			MLK_ROUNDUP(_sdata, _edata),
-			MLK_ROUNDUP(__bss_start, __bss_stop));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
+		   MLK_ROUNDUP(_text, _etext),
+		   MLK_ROUNDUP(__init_begin, __init_end),
+		   MLK_ROUNDUP(_sdata, _edata),
+		   MLK_ROUNDUP(__bss_start, __bss_stop));
 
 	/*
 	 * Check boundaries twice: Some fundamental inconsistencies can
@@ -658,7 +695,7 @@ void __init mem_init(void)
 	 */
 #ifdef CONFIG_MMU
 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
-	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
+	BUG_ON(TASK_SIZE				> MODULES_VADDR);
 #endif
 
 #ifdef CONFIG_HIGHMEM
@@ -677,6 +714,9 @@ void __init mem_init(void)
 	}
 }
 
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
 void free_initmem(void)
 {
 #ifdef CONFIG_HAVE_TCM
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6f9d28b..ba825b0 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -87,6 +87,7 @@ extern unsigned long arm_dma_pfn_limit;
 #define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
 #endif
 
+#define MAX_HOLE_ADDRESS    (PHYS_OFFSET + 0x10000000)
 extern phys_addr_t arm_lowmem_limit;
 
 void __init bootmem_init(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 5450b43..55bd742 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1006,6 +1006,19 @@ void __init sanity_check_meminfo(void)
 	int i, j, highmem = 0;
 	phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
 
+#ifdef CONFIG_ARCH_TRACKS_VMALLOC
+	unsigned long hole_start;
+	for (i = 0; i < (meminfo.nr_banks - 1); i++) {
+		hole_start = meminfo.bank[i].start + meminfo.bank[i].size;
+		if (hole_start != meminfo.bank[i+1].start) {
+			if (hole_start <= MAX_HOLE_ADDRESS) {
+				vmalloc_min = (void *) (vmalloc_min +
+				(meminfo.bank[i+1].start - hole_start));
+			}
+		}
+	}
+#endif
+
 	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
 		struct membank *bank = &meminfo.bank[j];
 		phys_addr_t size_limit;
@@ -1304,6 +1317,21 @@ static void __init kmap_init(void)
 #endif
 }
 
+static void __init reserve_virtual_lowmem(phys_addr_t start, phys_addr_t end)
+{
+#ifdef CONFIG_ARCH_TRACKS_VMALLOC
+	struct vm_struct *vm;
+
+	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+	vm->addr = (void *)__phys_to_virt(start);
+	vm->size = end - start;
+	vm->flags = VM_LOWMEM;
+	vm->caller = reserve_virtual_lowmem;
+	vm_area_add_early(vm);
+	mark_vmalloc_reserved_area(vm->addr, vm->size);
+#endif
+}
+
 static void __init map_lowmem(void)
 {
 	struct memblock_region *reg;
@@ -1325,6 +1353,7 @@ static void __init map_lowmem(void)
 		map.type = MT_MEMORY;
 
 		create_mapping(&map);
+		reserve_virtual_lowmem(start, end);
 	}
 }
 
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [RFC PATCHv3 10/11] arm: Use for_each_potential_vmalloc_area
       [not found] <1388699609-18214-1-git-send-email-lauraa@codeaurora.org>
                   ` (2 preceding siblings ...)
  2014-01-02 21:53 ` [RFC PATCHv3 09/11] arm: mm: Track lowmem in vmalloc Laura Abbott
@ 2014-01-02 21:53 ` Laura Abbott
  3 siblings, 0 replies; 5+ messages in thread
From: Laura Abbott @ 2014-01-02 21:53 UTC (permalink / raw)
  To: linux-arm-kernel

With CONFIG_INTERMIX_VMALLOC it is no longer the case that all
vmalloc is contained between VMALLOC_START and VMALLOC_END.
Some portions of code still rely on operating on all those regions
however. Use for_each_potential_vmalloc_area where appropriate to
do whatever is necessary to those regions.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
---
 arch/arm/kvm/mmu.c    |   12 ++++++++----
 arch/arm/mm/ioremap.c |   12 ++++++++----
 arch/arm/mm/mmu.c     |    9 +++++++--
 3 files changed, 23 insertions(+), 10 deletions(-)

diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 58090698..4d2ca7e 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -225,16 +225,20 @@ void free_boot_hyp_pgd(void)
 void free_hyp_pgds(void)
 {
 	unsigned long addr;
+	int i;
+	unsigned long vstart, unsigned long vend;
 
 	free_boot_hyp_pgd();
 
 	mutex_lock(&kvm_hyp_pgd_mutex);
 
 	if (hyp_pgd) {
-		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
-		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+		for_each_potential_nonvmalloc_area(&vstart, &vend, &i)
+			for (addr = vstart; addr < vend; addr += PGDIR_SIZE)
+				unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+		for_each_potential_vmalloc_area(&vstart, &vend, &i)
+			for (addr = vstart; addr < vend; addr += PGDIR_SIZE)
+				unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 
 		kfree(hyp_pgd);
 		hyp_pgd = NULL;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ad92d4f..892bc82 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -115,13 +115,17 @@ EXPORT_SYMBOL(ioremap_page);
 void __check_vmalloc_seq(struct mm_struct *mm)
 {
 	unsigned int seq;
+	int i;
+	unsigned long vstart, vend;
 
 	do {
 		seq = init_mm.context.vmalloc_seq;
-		memcpy(pgd_offset(mm, VMALLOC_START),
-		       pgd_offset_k(VMALLOC_START),
-		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
-					pgd_index(VMALLOC_START)));
+
+		for_each_potential_vmalloc_area(&vstart, &vend, &i)
+			memcpy(pgd_offset(mm, vstart),
+			       pgd_offset_k(vstart),
+			       sizeof(pgd_t) * (pgd_index(vend) -
+						pgd_index(vstart)));
 		mm->context.vmalloc_seq = seq;
 	} while (seq != init_mm.context.vmalloc_seq);
 }
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 55bd742..af8e43c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1217,6 +1217,8 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
 	struct map_desc map;
 	unsigned long addr;
 	void *vectors;
+	unsigned long vstart, vend;
+	int i;
 
 	/*
 	 * Allocate the vector page early.
@@ -1225,8 +1227,11 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
 
 	early_trap_init(vectors);
 
-	for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
-		pmd_clear(pmd_off_k(addr));
+
+	for_each_potential_vmalloc_area(&vstart, &vend, &i)
+		for (addr = vstart; addr < vend; addr += PMD_SIZE) {
+			pmd_clear(pmd_off_k(addr));
+	}
 
 	/*
 	 * Map the kernel if it is XIP.
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [RFC PATCHv3 06/11] arm: use is_vmalloc_addr
  2014-01-02 21:53 ` [RFC PATCHv3 06/11] arm: use is_vmalloc_addr Laura Abbott
@ 2014-01-02 22:13   ` Dave Hansen
  0 siblings, 0 replies; 5+ messages in thread
From: Dave Hansen @ 2014-01-02 22:13 UTC (permalink / raw)
  To: linux-arm-kernel

On 01/02/2014 01:53 PM, Laura Abbott wrote:
> is_vmalloc_addr already does the range checking against VMALLOC_START and
> VMALLOC_END. Use it.

FWIW, these first 6 look completely sane and should get merged
regardless of what gets done with the rest.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2014-01-02 22:13 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <1388699609-18214-1-git-send-email-lauraa@codeaurora.org>
2014-01-02 21:53 ` [RFC PATCHv3 06/11] arm: use is_vmalloc_addr Laura Abbott
2014-01-02 22:13   ` Dave Hansen
2014-01-02 21:53 ` [RFC PATCHv3 07/11] arm: mm: Add iotable_init_novmreserve Laura Abbott
2014-01-02 21:53 ` [RFC PATCHv3 09/11] arm: mm: Track lowmem in vmalloc Laura Abbott
2014-01-02 21:53 ` [RFC PATCHv3 10/11] arm: Use for_each_potential_vmalloc_area Laura Abbott

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).