* [RFC PATCHv2 0/4] Intermix Lowmem and Vmalloc
@ 2013-11-12 22:27 Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 1/4] arm: mm: Add iotable_init_novmreserve Laura Abbott
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Laura Abbott @ 2013-11-12 22:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-mm; +Cc: Kyungmin Park, Russell King
Hi,
This is v2 of a patch series to allow lowmem and vmalloc to be
intermixed (http://lists.infradead.org/pipermail/linux-arm-kernel/2013-November/210578.html
provides the full decriptions which I've omitted here for brevity)
Nots I forgot to mention before:
- as a side effect of how CMA is setup, the remapped lowmem regions are
explicitly printed out in /proc/vmallocinfo which has turned out to be useful
for debugging. I couldn't find a good way to support those and the lowmem
regions in vmalloc short of adding partial region unmapping.
- I still have a nagging concern about regions that straddle the lowmem/highmem
boundary causing problems. I haven't been able to find a concrete issue yet
though..
v2: Fixed several comments by Kyungmin Park which led me to discover
several issues with the is_vmalloc_addr implementation. is_vmalloc_addr
is probably the ugliest part of the entire series and I debated if
adding extra vmalloc flags would make it less ugly.
Again, comments or suggestions on how to better accomplish this are welcome.
Thanks,
Laura
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [RFC PATCHv2 1/4] arm: mm: Add iotable_init_novmreserve
2013-11-12 22:27 [RFC PATCHv2 0/4] Intermix Lowmem and Vmalloc Laura Abbott
@ 2013-11-12 22:27 ` Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 2/4] arm: mm: Track lowmem in vmalloc Laura Abbott
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Laura Abbott @ 2013-11-12 22:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-mm; +Cc: Kyungmin Park, Russell King, Laura Abbott
iotable_init is currently used by dma_contiguous_remap to remap
CMA memory appropriately. This has the side effect of reserving
the area of CMA in the vmalloc tracking structures. This is fine
under normal circumstances but it creates conflicts if we want
to track lowmem in vmalloc. Since dma_contiguous_remap is only
really concerned with the remapping, introduce iotable_init_novmreserve
to allow remapping of pages without reserving the virtual address
in vmalloc space.
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
---
arch/arm/include/asm/mach/map.h | 2 ++
arch/arm/mm/dma-mapping.c | 2 +-
arch/arm/mm/ioremap.c | 5 +++--
arch/arm/mm/mm.h | 2 +-
arch/arm/mm/mmu.c | 17 ++++++++++++++---
5 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 2fe141f..02e3509 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -37,6 +37,7 @@ struct map_desc {
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
+extern void iotable_init_novmreserve(struct map_desc *, int);
extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller);
@@ -56,6 +57,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype);
#else
#define iotable_init(map,num) do { } while (0)
+#define iotable_init_novmreserve(map,num) do { } while(0)
#define vm_reserve_area_early(a,s,c) do { } while (0)
#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7f9b179..bf80e43 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -435,7 +435,7 @@ void __init dma_contiguous_remap(void)
addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
- iotable_init(&map, 1);
+ iotable_init_novmreserve(&map, 1);
}
}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index f123d6e..ad92d4f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -84,14 +84,15 @@ struct static_vm *find_static_vm_vaddr(void *vaddr)
return NULL;
}
-void __init add_static_vm_early(struct static_vm *svm)
+void __init add_static_vm_early(struct static_vm *svm, bool add_to_vm)
{
struct static_vm *curr_svm;
struct vm_struct *vm;
void *vaddr;
vm = &svm->vm;
- vm_area_add_early(vm);
+ if (add_to_vm)
+ vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9a..27a3680 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -75,7 +75,7 @@ struct static_vm {
extern struct list_head static_vmlist;
extern struct static_vm *find_static_vm_vaddr(void *vaddr);
-extern __init void add_static_vm_early(struct static_vm *svm);
+extern __init void add_static_vm_early(struct static_vm *svm, bool add_to_vm);
#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 53cdbd3..b83ed88 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -817,7 +817,8 @@ static void __init create_mapping(struct map_desc *md)
/*
* Create the architecture specific mappings
*/
-void __init iotable_init(struct map_desc *io_desc, int nr)
+static void __init __iotable_init(struct map_desc *io_desc, int nr,
+ bool add_to_vm)
{
struct map_desc *md;
struct vm_struct *vm;
@@ -838,10 +839,20 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
- add_static_vm_early(svm++);
+ add_static_vm_early(svm++, add_to_vm);
}
}
+void __init iotable_init(struct map_desc *io_desc, int nr)
+{
+ return __iotable_init(io_desc, nr, true);
+}
+
+void __init iotable_init_novmreserve(struct map_desc *io_desc, int nr)
+{
+ return __iotable_init(io_desc, nr, false);
+}
+
void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller)
{
@@ -855,7 +866,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
vm->size = size;
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = caller;
- add_static_vm_early(svm);
+ add_static_vm_early(svm, true);
}
#ifndef CONFIG_ARM_LPAE
--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCHv2 2/4] arm: mm: Track lowmem in vmalloc
2013-11-12 22:27 [RFC PATCHv2 0/4] Intermix Lowmem and Vmalloc Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 1/4] arm: mm: Add iotable_init_novmreserve Laura Abbott
@ 2013-11-12 22:27 ` Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 3/4] mm/vmalloc.c: Allow lowmem to be tracked " Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 4/4] mm/vmalloc.c: Treat the entire kernel virtual space as vmalloc Laura Abbott
3 siblings, 0 replies; 5+ messages in thread
From: Laura Abbott @ 2013-11-12 22:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-mm
Cc: Kyungmin Park, Russell King, Laura Abbott, Neeti Desai
Rather than always keeping lowmem and vmalloc separate, we can
now allow the two to be mixed. This means that all lowmem areas
need to be explicitly tracked in vmalloc to avoid over allocating.
Additionally, adjust the vmalloc reserve to account for the fact
that there may be a hole in the middle consisting of vmalloc.
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
---
arch/arm/Kconfig | 3 +
arch/arm/mm/init.c | 104 ++++++++++++++++++++++++++++++++++++----------------
arch/arm/mm/mm.h | 1 +
arch/arm/mm/mmu.c | 23 +++++++++++
4 files changed, 99 insertions(+), 32 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 051fce4..1f36664 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -270,6 +270,9 @@ config GENERIC_BUG
def_bool y
depends on BUG
+config ARCH_TRACKS_VMALLOC
+ bool
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 15225d8..c9ca316 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -576,6 +576,46 @@ static void __init free_highpages(void)
#endif
}
+#define MLK(b, t) b, t, ((t) - (b)) >> 10
+#define MLM(b, t) b, t, ((t) - (b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
+
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+void print_vmalloc_lowmem_info(void)
+{
+ int i;
+ void *va_start, *va_end;
+
+ printk(KERN_NOTICE
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM(VMALLOC_START, VMALLOC_END));
+
+ for (i = meminfo.nr_banks - 1; i >= 0; i--) {
+ if (!meminfo.bank[i].highmem) {
+ va_start = __va(meminfo.bank[i].start);
+ va_end = __va(meminfo.bank[i].start +
+ meminfo.bank[i].size);
+ printk(KERN_NOTICE
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)va_start, (unsigned long)va_end));
+ }
+ if (i && ((meminfo.bank[i-1].start + meminfo.bank[i-1].size) !=
+ meminfo.bank[i].start)) {
+ if (meminfo.bank[i-1].start + meminfo.bank[i-1].size
+ <= MAX_HOLE_ADDRESS) {
+ va_start = __va(meminfo.bank[i-1].start
+ + meminfo.bank[i-1].size);
+ va_end = __va(meminfo.bank[i].start);
+ printk(KERN_NOTICE
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)va_start,
+ (unsigned long)va_end));
+ }
+ }
+ }
+}
+#endif
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
@@ -604,55 +644,52 @@ void __init mem_init(void)
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
#ifdef CONFIG_HAVE_TCM
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
" ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
#endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
- " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
-
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
#ifdef CONFIG_HAVE_TCM
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
- MLK(FIXADDR_START, FIXADDR_TOP),
- MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLK(FIXADDR_START, FIXADDR_TOP));
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ print_vmalloc_lowmem_info();
+#else
+ printk(KERN_NOTICE
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM(VMALLOC_START, VMALLOC_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory));
+#endif
#ifdef CONFIG_HIGHMEM
- MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
+ printk(KERN_NOTICE
+ " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+#ifdef CONFIG_MODULES
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
+#ifdef CONFIG_HIGHMEM
+ MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
#endif
#ifdef CONFIG_MODULES
- MLM(MODULES_VADDR, MODULES_END),
+ MLM(MODULES_VADDR, MODULES_END),
#endif
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(__bss_start, __bss_stop));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
+ MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_sdata, _edata),
+ MLK_ROUNDUP(__bss_start, __bss_stop));
/*
* Check boundaries twice: Some fundamental inconsistencies can
@@ -660,7 +697,7 @@ void __init mem_init(void)
*/
#ifdef CONFIG_MMU
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
- BUG_ON(TASK_SIZE > MODULES_VADDR);
+ BUG_ON(TASK_SIZE > MODULES_VADDR);
#endif
#ifdef CONFIG_HIGHMEM
@@ -679,6 +716,9 @@ void __init mem_init(void)
}
}
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
void free_initmem(void)
{
#ifdef CONFIG_HAVE_TCM
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 27a3680..f484e52 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -85,6 +85,7 @@ extern phys_addr_t arm_dma_limit;
#define arm_dma_limit ((phys_addr_t)~0)
#endif
+#define MAX_HOLE_ADDRESS (PHYS_OFFSET + 0x10000000)
extern phys_addr_t arm_lowmem_limit;
void __init bootmem_init(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b83ed88..ed2a4fa 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1004,6 +1004,19 @@ void __init sanity_check_meminfo(void)
int i, j, highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
+#ifdef CONFIG_ARCH_TRACKS_VMALLOC
+ unsigned long hole_start;
+ for (i = 0; i < (meminfo.nr_banks - 1); i++) {
+ hole_start = meminfo.bank[i].start + meminfo.bank[i].size;
+ if (hole_start != meminfo.bank[i+1].start) {
+ if (hole_start <= MAX_HOLE_ADDRESS) {
+ vmalloc_min = (void *) (vmalloc_min +
+ (meminfo.bank[i+1].start - hole_start));
+ }
+ }
+ }
+#endif
+
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
phys_addr_t size_limit;
@@ -1311,6 +1324,7 @@ static void __init map_lowmem(void)
phys_addr_t start = reg->base;
phys_addr_t end = start + reg->size;
struct map_desc map;
+ struct vm_struct *vm;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1323,6 +1337,15 @@ static void __init map_lowmem(void)
map.type = MT_MEMORY;
create_mapping(&map);
+
+#ifdef CONFIG_ARCH_TRACKS_VMALLOC
+ vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ vm->addr = (void *)map.virtual;
+ vm->size = end - start;
+ vm->flags = VM_LOWMEM;
+ vm->caller = map_lowmem;
+ vm_area_add_early(vm);
+#endif
}
}
--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCHv2 3/4] mm/vmalloc.c: Allow lowmem to be tracked in vmalloc
2013-11-12 22:27 [RFC PATCHv2 0/4] Intermix Lowmem and Vmalloc Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 1/4] arm: mm: Add iotable_init_novmreserve Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 2/4] arm: mm: Track lowmem in vmalloc Laura Abbott
@ 2013-11-12 22:27 ` Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 4/4] mm/vmalloc.c: Treat the entire kernel virtual space as vmalloc Laura Abbott
3 siblings, 0 replies; 5+ messages in thread
From: Laura Abbott @ 2013-11-12 22:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-mm
Cc: Kyungmin Park, Russell King, Laura Abbott, Neeti Desai
vmalloc is currently assumed to be a completely separate address space
from the lowmem region. While this may be true in the general case,
there are some instances where lowmem and virtual space intermixing
provides gains. One example is needing to steal a large chunk of physical
lowmem for another purpose outside the systems usage. Rather than
waste the precious lowmem space on a 32-bit system, we can allow the
virtual holes created by the physical holes to be used by vmalloc
for virtual addressing. Track lowmem allocations in vmalloc to
allow mixing of lowmem and vmalloc.
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
---
include/linux/mm.h | 6 ++++++
include/linux/vmalloc.h | 1 +
mm/Kconfig | 11 +++++++++++
mm/vmalloc.c | 35 +++++++++++++++++++++++++++++++++++
4 files changed, 53 insertions(+), 0 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f022460..f2da420 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -308,6 +308,10 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
+
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern int is_vmalloc_addr(const void *x);
+#else
static inline int is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
@@ -318,6 +322,8 @@ static inline int is_vmalloc_addr(const void *x)
return 0;
#endif
}
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4b8a891..e0c8c49 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -16,6 +16,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
+#define VM_LOWMEM 0x00000040 /* Tracking of direct mapped lowmem */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
diff --git a/mm/Kconfig b/mm/Kconfig
index 8028dcc..b3c459d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -519,3 +519,14 @@ config MEM_SOFT_DIRTY
it can be cleared by hands.
See Documentation/vm/soft-dirty.txt for more details.
+
+config ENABLE_VMALLOC_SAVING
+ bool "Intermix lowmem and vmalloc virtual space"
+ depends on ARCH_TRACKS_VMALLOC
+ help
+ Some memory layouts on embedded systems steal large amounts
+ of lowmem physical memory for purposes outside of the kernel.
+ Rather than waste the physical and virtual space, allow the
+ kernel to use the virtual space as vmalloc space.
+
+ If unsure, say N.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 13a5495..2ec9ac7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -282,6 +282,38 @@ static unsigned long cached_align;
static unsigned long vmap_area_pcpu_hole;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+int is_vmalloc_addr(const void *x)
+{
+ struct vmap_area *va;
+ int ret = 0;
+
+ spin_lock(&vmap_area_lock);
+ list_for_each_entry(va, &vmap_area_list, list) {
+ if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+ continue;
+
+ if (!(va->flags & VM_VM_AREA))
+ continue;
+
+ if (va->vm == NULL)
+ continue;
+
+ if (va->vm->flags & VM_LOWMEM)
+ continue;
+
+ if ((unsigned long)x >= va->va_start &&
+ (unsigned long)x < va->va_end) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock(&vmap_area_lock);
+ return ret;
+}
+EXPORT_SYMBOL(is_vmalloc_addr);
+#endif
+
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
@@ -2628,6 +2660,9 @@ static int s_show(struct seq_file *m, void *p)
if (v->flags & VM_VPAGES)
seq_printf(m, " vpages");
+ if (v->flags & VM_LOWMEM)
+ seq_printf(m, " lowmem");
+
show_numa_info(m, v);
seq_putc(m, '\n');
return 0;
--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCHv2 4/4] mm/vmalloc.c: Treat the entire kernel virtual space as vmalloc
2013-11-12 22:27 [RFC PATCHv2 0/4] Intermix Lowmem and Vmalloc Laura Abbott
` (2 preceding siblings ...)
2013-11-12 22:27 ` [RFC PATCHv2 3/4] mm/vmalloc.c: Allow lowmem to be tracked " Laura Abbott
@ 2013-11-12 22:27 ` Laura Abbott
3 siblings, 0 replies; 5+ messages in thread
From: Laura Abbott @ 2013-11-12 22:27 UTC (permalink / raw)
To: linux-arm-kernel, linux-mm
Cc: Kyungmin Park, Russell King, Laura Abbott, Neeti Desai
With CONFIG_ENABLE_VMALLOC_SAVINGS, all lowmem is tracked in
vmalloc. This means that all the kernel virtual address space
can be treated as part of the vmalloc region. Allow vm areas
to be allocated from the full kernel address range.
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
---
mm/vmalloc.c | 11 +++++++++++
1 files changed, 11 insertions(+), 0 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2ec9ac7..31644b6 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1394,16 +1394,27 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL,
+ __builtin_return_address(0));
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
+#endif
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL, caller);
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller);
+#endif
}
/**
--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2013-11-12 22:27 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-11-12 22:27 [RFC PATCHv2 0/4] Intermix Lowmem and Vmalloc Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 1/4] arm: mm: Add iotable_init_novmreserve Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 2/4] arm: mm: Track lowmem in vmalloc Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 3/4] mm/vmalloc.c: Allow lowmem to be tracked " Laura Abbott
2013-11-12 22:27 ` [RFC PATCHv2 4/4] mm/vmalloc.c: Treat the entire kernel virtual space as vmalloc Laura Abbott
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).