public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Ravikiran G Thirumalai <kiran@scalex86.org>
To: Andrew Morton <akpm@osdl.org>
Cc: linux-kernel@vger.kernel.org, dipankar@in.ibm.com,
	bharata@in.ibm.com, shai@scalex86.org,
	Rusty Russell <rusty@rustcorp.com.au>
Subject: [patch 10/11] mm: Reimplementation of dynamic per-cpu allocator -- allow_early_mapvmarea
Date: Tue, 13 Sep 2005 09:18:06 -0700	[thread overview]
Message-ID: <20050913161806.GL3570@localhost.localdomain> (raw)
In-Reply-To: <20050913155112.GB3570@localhost.localdomain>

This patch provides for early calls to map_vm_area.  Currently, map_vm_area
cannot be called early during the boot process since map_vm_area depends on
kmalloc for the vm_struct objects.  This patch is a bad hack to let
map_vm_area work early, but just for a few calls so that the dynamic per-cpu
subsystem can allocate a block and satisfy some early requests.  This is
primarily to enable slab code use alloc_percpu for slab head arrays.  This
patch might not be elegant, but solves the chicken and egg problem in using
alloc_percpu for slab.

Signed-off-by: Ravikiran Thirumalai <kiran.th@gmail.com>

Index: alloc_percpu-2.6.13-rc6/include/linux/slab.h
===================================================================
--- alloc_percpu-2.6.13-rc6.orig/include/linux/slab.h	2005-08-14 21:47:56.000000000 -0700
+++ alloc_percpu-2.6.13-rc6/include/linux/slab.h	2005-08-15 17:29:41.000000000 -0700
@@ -76,6 +76,8 @@
 extern struct cache_sizes malloc_sizes[];
 extern void *__kmalloc(size_t, unsigned int __nocast);
 
+#define SLAB_READY ({malloc_sizes[0].cs_cachep != NULL;})
+
 static inline void *kmalloc(size_t size, unsigned int __nocast flags)
 {
 	if (__builtin_constant_p(size)) {
Index: alloc_percpu-2.6.13-rc6/include/linux/vmalloc.h
===================================================================
--- alloc_percpu-2.6.13-rc6.orig/include/linux/vmalloc.h	2005-08-15 17:28:42.000000000 -0700
+++ alloc_percpu-2.6.13-rc6/include/linux/vmalloc.h	2005-08-15 17:29:41.000000000 -0700
@@ -8,6 +8,7 @@
 #define VM_IOREMAP	0x00000001	/* ioremap() and friends */
 #define VM_ALLOC	0x00000002	/* vmalloc() */
 #define VM_MAP		0x00000004	/* vmap()ed pages */
+#define VM_EARLY	0x00000008	/* indicates static vm_struct */
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 struct vm_struct {
Index: alloc_percpu-2.6.13-rc6/mm/vmalloc.c
===================================================================
--- alloc_percpu-2.6.13-rc6.orig/mm/vmalloc.c	2005-08-15 17:28:42.000000000 -0700
+++ alloc_percpu-2.6.13-rc6/mm/vmalloc.c	2005-08-15 17:35:29.000000000 -0700
@@ -160,6 +160,15 @@
 
 #define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
 
+/* 
+ * Statically define a few vm_structs so that early per-cpu allocator code
+ * can get vm_areas even before slab is up. NR_EARLY_VMAREAS should remain
+ * in single digits
+ */
+#define NR_EARLY_VMAREAS (1)
+static struct vm_struct early_vmareas[NR_EARLY_VMAREAS];
+static int early_vmareas_idx = 0;
+
 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
 				unsigned long start, unsigned long end,
 				unsigned int gfp_flags)
@@ -168,6 +177,9 @@
 	unsigned long align = 1;
 	unsigned long addr;
 
+	if (unlikely(!size))
+		return NULL;
+
 	if (flags & VM_IOREMAP) {
 		int bit = fls(size);
 
@@ -184,10 +196,16 @@
 	area = kmalloc(sizeof(*area), gfp_flags);
 	if (unlikely(!area))
 		return NULL;
-
-	if (unlikely(!size)) {
-		kfree (area);
-		return NULL;
+ 	if (likely(SLAB_READY)) {
+ 		area = kmalloc(sizeof(*area), GFP_KERNEL);
+ 		if (unlikely(!area))
+ 			return NULL;
+ 	} else {
+ 		if (early_vmareas_idx < NR_EARLY_VMAREAS) {
+ 			area = &early_vmareas[early_vmareas_idx++];
+ 			flags |= VM_EARLY;
+ 		} else
+ 			return NULL;
 	}
 
 	/*
@@ -228,7 +246,8 @@
 
 out:
 	write_unlock(&vmlist_lock);
-	kfree(area);
+	if (likely(!(flags & VM_EARLY)))
+		kfree(area);
 	if (printk_ratelimit())
 		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
 	return NULL;
@@ -326,7 +345,8 @@
 			kfree(area->pages);
 	}
 
-	kfree(area);
+	if (likely(!(area->flags & VM_EARLY)))
+		kfree(area);
 	return;
 }
 
@@ -415,7 +435,8 @@
 	area->pages = pages;
 	if (!area->pages) {
 		remove_vm_area(area->addr);
-		kfree(area);
+		if (likely(!(area->flags & VM_EARLY)))
+			kfree(area);
 		return NULL;
 	}
 	memset(area->pages, 0, array_size);

  parent reply	other threads:[~2005-09-13 16:18 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2005-09-13 15:51 [patch 0/11] mm: Reimplementation of dynamic per-cpu allocator Ravikiran G Thirumalai
2005-09-13 15:54 ` [patch 1/11] mm: Reimplementation of dynamic per-cpu allocator -- vmalloc_fixup Ravikiran G Thirumalai
2005-09-13 19:25   ` Andrew Morton
2005-09-13 15:56 ` [patch 2/11] mm: Reimplementation of dynamic per-cpu allocator -- alloc_percpu Ravikiran G Thirumalai
2005-09-13 15:58 ` [patch 3/11] mm: Reimplementation of dynamic per-cpu allocator -- alloc_percpu_atomic Ravikiran G Thirumalai
2005-09-13 15:59 ` [patch 4/11] mm: Reimplementation of dynamic per-cpu allocator -- change_alloc_percpu_users Ravikiran G Thirumalai
2005-09-13 16:01 ` [patch 5/11] mm: Bigrefs -- add_getcpuptr Ravikiran G Thirumalai
2005-09-13 16:04 ` [patch 6/11] mm: Bigrefs -- distributed refcounters Ravikiran G Thirumalai
2005-09-13 16:10 ` [patch 7/11] net: Use bigrefs for net_device.refcount Ravikiran G Thirumalai
2005-09-13 16:26   ` Stephen Hemminger
2005-09-13 16:35     ` Ben Greear
2005-09-13 16:46       ` Stephen Hemminger
2005-09-13 20:26     ` David S. Miller
2005-09-13 22:16       ` Ravikiran G Thirumalai
2005-09-13 18:27   ` Eric Dumazet
2005-09-13 18:53     ` Ravikiran G Thirumalai
2005-09-13 16:12 ` [patch 8/11] net: dst_abstraction macros Ravikiran G Thirumalai
2005-09-13 16:17 ` [patch 9/11] net: dst_entry.refcount, use, lastuse to use alloc_percpu Ravikiran G Thirumalai
2005-09-13 20:24   ` David S. Miller
2005-09-13 22:07     ` Ravikiran G Thirumalai
2005-09-13 22:12       ` David S. Miller
2005-09-13 23:17         ` Ravikiran G Thirumalai
2005-09-13 23:27           ` David S. Miller
2005-09-14  7:21             ` Rusty Russell
2005-09-13 16:18 ` Ravikiran G Thirumalai [this message]
2005-09-13 16:19 ` [patch 11/11] mm: Reimplementation of dynamic per-cpu allocator -- hotplug_alloc_percpu_blocks Ravikiran G Thirumalai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20050913161806.GL3570@localhost.localdomain \
    --to=kiran@scalex86.org \
    --cc=akpm@osdl.org \
    --cc=bharata@in.ibm.com \
    --cc=dipankar@in.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=rusty@rustcorp.com.au \
    --cc=shai@scalex86.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox