linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/9] mm: add generic adaptive large memory allocation APIs
@ 2010-05-13  9:51 Changli Gao
  2010-05-13 13:20 ` Peter Zijlstra
  2010-05-13 14:39 ` Milton Miller
  0 siblings, 2 replies; 9+ messages in thread
From: Changli Gao @ 2010-05-13  9:51 UTC (permalink / raw)
  To: akpm
  Cc: Hoang-Nam Nguyen, Christoph Raisch, Roland Dreier, Sean Hefty,
	Hal Rosenstock, Divy Le Ray, James E.J. Bottomley,
	Theodore Ts'o, Andreas Dilger, Alexander Viro, Paul Menage,
	Li Zefan, linux-rdma, linux-kernel, netdev, linux-scsi,
	linux-ext4, linux-fsdevel, linux-mm, containers, Eric Dumazet,
	Tetsuo Handa, Peter Zijlstra, Changli Gao

generic adaptive large memory allocation APIs

kv*alloc are used to allocate large contiguous memory and the users don't mind
whether the memory is physically or virtually contiguous. The allocator always
try its best to allocate physically contiguous memory first.

In this patch set, some APIs are introduced: kvmalloc(), kvzalloc(), kvcalloc(),
kvrealloc(), kvfree() and kvfree_inatomic().

Signed-off-by: Changli Gao <xiaosuo@gmail.com>
----
 include/linux/mm.h      |   31 ++++++++++++++
 include/linux/vmalloc.h |    1 
 mm/nommu.c              |    6 ++
 mm/util.c               |  104 ++++++++++++++++++++++++++++++++++++++++++++++++
 mm/vmalloc.c            |   14 ++++++
 5 files changed, 156 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 462acaf..0ece978 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1467,5 +1467,36 @@ extern int soft_offline_page(struct page *page, int flags);
 
 extern void dump_page(struct page *page);
 
+void *__kvmalloc(size_t size, gfp_t flags);
+
+static inline void *kvmalloc(size_t size)
+{
+	return __kvmalloc(size, 0);
+}
+
+static inline void *kvzalloc(size_t size)
+{
+	return __kvmalloc(size, __GFP_ZERO);
+}
+
+static inline void *kvcalloc(size_t n, size_t size)
+{
+	return __kvmalloc(n * size, __GFP_ZERO);
+}
+
+void __kvfree(void *ptr, bool inatomic);
+
+static inline void kvfree(void *ptr)
+{
+	__kvfree(ptr, false);
+}
+
+static inline void kvfree_inatomic(void *ptr)
+{
+	__kvfree(ptr, true);
+}
+
+void *kvrealloc(void *ptr, size_t newsize);
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 227c2a5..33ec828 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -60,6 +60,7 @@ extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
 extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
 				pgprot_t prot);
 extern void vfree(const void *addr);
+extern unsigned long vsize(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
 			unsigned long flags, pgprot_t prot);
diff --git a/mm/nommu.c b/mm/nommu.c
index 63fa17d..1ddf3fe 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -223,6 +223,12 @@ void vfree(const void *addr)
 }
 EXPORT_SYMBOL(vfree);
 
+unsigned long vsize(const void *addr)
+{
+	return ksize(addr);
+}
+EXPORT_SYMBOL(vsize);
+
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 {
 	/*
diff --git a/mm/util.c b/mm/util.c
index f5712e8..7cc364a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -5,6 +5,7 @@
 #include <linux/err.h>
 #include <linux/sched.h>
 #include <asm/uaccess.h>
+#include <linux/vmalloc.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/kmem.h>
@@ -289,6 +290,109 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
+void *__kvmalloc(size_t size, gfp_t flags)
+{
+	void *ptr;
+
+	if (size < PAGE_SIZE)
+		return kmalloc(size, GFP_KERNEL | flags);
+	size = PAGE_ALIGN(size);
+	if (is_power_of_2(size))
+		ptr = (void *)__get_free_pages(GFP_KERNEL | flags |
+					       __GFP_NOWARN, get_order(size));
+	else
+		ptr = alloc_pages_exact(size, GFP_KERNEL | flags |
+					      __GFP_NOWARN);
+	if (ptr != NULL) {
+		virt_to_head_page(ptr)->private = size;
+		return ptr;
+	}
+
+	ptr = vmalloc(size);
+	if (ptr != NULL && (flags & __GFP_ZERO))
+		memset(ptr, 0, size);
+
+	return ptr;
+}
+EXPORT_SYMBOL(__kvmalloc);
+
+static void kvfree_work(struct work_struct *work)
+{
+	vfree(work);
+}
+
+void __kvfree(void *ptr, bool inatomic)
+{
+	if (unlikely(ZERO_OR_NULL_PTR(ptr)))
+		return;
+	if (is_vmalloc_addr(ptr)) {
+		if (inatomic) {
+			struct work_struct *work;
+
+			work = ptr;
+			BUILD_BUG_ON(sizeof(struct work_struct) > PAGE_SIZE);
+			INIT_WORK(work, kvfree_work);
+			schedule_work(work);
+		} else {
+			vfree(ptr);
+		}
+	} else {
+		struct page *page;
+
+		page = virt_to_head_page(ptr);
+		if (PageSlab(page) || PageCompound(page))
+			kfree(ptr);
+		else if (is_power_of_2(page->private))
+			free_pages((unsigned long)ptr,
+				   get_order(page->private));
+		else
+			free_pages_exact(ptr, page->private);
+	}
+}
+EXPORT_SYMBOL(__kvfree);
+
+void *kvrealloc(void *ptr, size_t newsize)
+{
+	void *nptr;
+	size_t oldsize;
+
+	if (unlikely(!newsize)) {
+		kvfree(ptr);
+		return ZERO_SIZE_PTR;
+	}
+
+	if (unlikely(ZERO_OR_NULL_PTR(ptr)))
+		return kvmalloc(newsize);
+
+	if (is_vmalloc_addr(ptr)) {
+		oldsize = vsize(ptr);
+		if (newsize <= oldsize)
+			return ptr;
+	} else {
+		struct page *page;
+
+		page = virt_to_head_page(ptr);
+		if (PageSlab(page) || PageCompound(page)) {
+			if (newsize < PAGE_SIZE)
+				return krealloc(ptr, newsize, GFP_KERNEL);
+			oldsize = ksize(ptr);
+		} else {
+			oldsize = page->private;
+			if (newsize <= oldsize)
+				return ptr;
+		}
+	}
+
+	nptr = kvmalloc(newsize);
+	if (nptr != NULL) {
+		memcpy(nptr, ptr, oldsize);
+		kvfree(ptr);
+	}
+
+	return nptr;
+}
+EXPORT_SYMBOL(kvrealloc);
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ae00746..93552a8 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1413,6 +1413,20 @@ void vfree(const void *addr)
 EXPORT_SYMBOL(vfree);
 
 /**
+ *	vsize  -  get the actual amount of memory allocated by vmalloc()
+ *	@addr:		memory base address
+ */
+unsigned long vsize(const void *addr)
+{
+	struct vmap_area *va;
+
+	va = find_vmap_area((unsigned long)addr);
+
+	return va->va_end - va->va_start - PAGE_SIZE;
+}
+EXPORT_SYMBOL(vsize);
+
+/**
  *	vunmap  -  release virtual mapping obtained by vmap()
  *	@addr:		memory base address
  *

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2010-05-17  1:34 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-13  9:51 [PATCH 1/9] mm: add generic adaptive large memory allocation APIs Changli Gao
2010-05-13 13:20 ` Peter Zijlstra
2010-05-13 13:36   ` [PATCH 1/9] mm: add generic adaptive large memory allocationAPIs Tetsuo Handa
2010-05-17  1:34     ` KOSAKI Motohiro
2010-05-13 14:08   ` [PATCH 1/9] mm: add generic adaptive large memory allocation APIs Changli Gao
2010-05-14  8:03     ` Peter Zijlstra
2010-05-14  8:12       ` Changli Gao
2010-05-13 14:39 ` Milton Miller
2010-05-13 14:49   ` Changli Gao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).