From mboxrd@z Thu Jan 1 00:00:00 1970 From: Changli Gao Subject: [RFC] mm: generic adaptive large memory allocation APIs Date: Thu, 6 May 2010 08:30:38 +0800 Message-ID: <1273105838-4441-1-git-send-email-xiaosuo@gmail.com> Cc: Eric Dumazet , Jiri Slaby , Changli Gao , Alexander Viro , "Paul E. McKenney" , Alexey Dobriyan , Ingo Molnar , Peter Zijlstra , linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, Avi Kivity , Tetsuo Handa To: akpm@linux-foundation.org Return-path: Sender: linux-kernel-owner@vger.kernel.org List-Id: linux-fsdevel.vger.kernel.org kvmalloc() will try to allocate physically contiguous memory first, and try vmalloc to allocate virtually contiguous memory when the former allocation fails. kvfree() is used to free the memory allocated by kvmalloc(). It can't be used in atomic context. If the callers are in atomic contex, they can use kvfree_inatomic() instead. There is much duplicate code to do such things in kernel, so I generate the above APIs. Thank Eric Dumazet for the "kv" prefix. :) #include #include #include #include #include #include #include void *kvmalloc(size_t size) { void *ptr; if (size < PAGE_SIZE) return kmalloc(PAGE_SIZE, GFP_KERNEL); ptr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN); if (ptr != NULL) return ptr; return vmalloc(size); } EXPORT_SYMBOL(kvmalloc); void kvfree(void *ptr, size_t size) { if (size < PAGE_SIZE) kfree(ptr); else if (is_vmalloc_addr(ptr)) vfree(ptr); else free_pages_exact(ptr, size); } EXPORT_SYMBOL(kvfree); struct kvfree_work_struct { struct work_struct work; void *head; void **ptail; }; DEFINE_PER_CPU(struct kvfree_work_struct, kvfree_work_struct); static void kvfree_work(struct work_struct *_work) { struct kvfree_work_struct *work; void *head, *tmp; work = container_of(_work, struct kvfree_work_struct, work); local_bh_disable(); head = work->head; work->head = NULL; work->ptail = &work->head; local_bh_enable(); while (head) { tmp = head; head = *(void **)head; vfree(tmp); } } void kvfree_inatomic(void *ptr, size_t size) { if (size < PAGE_SIZE) { kfree(ptr); } else if (is_vmalloc_addr(ptr)) { struct kvfree_work_struct *work; *(void **)ptr = NULL; local_irq_disable(); work = this_cpu_ptr(&kvfree_work_struct); *(work->ptail) = ptr; work->ptail = (void**)ptr; schedule_work(&work->work); local_irq_enable(); } else { free_pages_exact(ptr, size); } } EXPORT_SYMBOL(kvfree_inatomic); static int kvfree_work_struct_init(void) { int cpu; struct kvfree_work_struct *work; for_each_possible_cpu(cpu) { work = per_cpu_ptr(&kvfree_work_struct, cpu); INIT_WORK(&work->work, kvfree_work); work->head = NULL; work->ptail = &work->head; } return 0; } //pure_initcall(kvfree_work_struct_init); //-------------------- // for testing static int test_init(void) { int size; void *ptr; kvfree_work_struct_init(); for (size = 1; size < (1<<30); size <<= 1) { ptr = kvmalloc(size); if (is_vmalloc_addr(ptr)) { printk("%d\n", size); break; } kvfree(ptr, size); } return 0; } module_init(test_init); static void test_exit(void) { int cpu; struct kvfree_work_struct *work; for_each_possible_cpu(cpu) { work = per_cpu_ptr(&kvfree_work_struct, cpu); cancel_work_sync(&work->work); } } module_exit(test_exit); MODULE_LICENSE("GPL");