From: Ravikiran G Thirumalai <kiran@scalex86.org>
To: Andrew Morton <akpm@osdl.org>
Cc: linux-kernel@vger.kernel.org, dipankar@in.ibm.com,
bharata@in.ibm.com, shai@scalex86.org,
Rusty Russell <rusty@rustcorp.com.au>
Subject: [patch 11/11] mm: Reimplementation of dynamic per-cpu allocator -- hotplug_alloc_percpu_blocks
Date: Tue, 13 Sep 2005 09:19:14 -0700 [thread overview]
Message-ID: <20050913161914.GM3570@localhost.localdomain> (raw)
In-Reply-To: <20050913155112.GB3570@localhost.localdomain>
Patch to hotplug chunks of memory for alloc_percpu blocks when a cpu comes
up. This is needed when alloc_percpu is used real early and the
cpu_possible mask is not fully setup. Then, the backing chunks of memory
are allocated when cpus come up.
Signed-off-by: Alok N Kataria <alokk@calsoftinc.com>
Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: Shai Fultheim <shai@scalex86.org>
Index: alloc_percpu-2.6.13/include/linux/percpu.h
===================================================================
--- alloc_percpu-2.6.13.orig/include/linux/percpu.h 2005-09-12 12:23:34.000000000 -0700
+++ alloc_percpu-2.6.13/include/linux/percpu.h 2005-09-12 18:39:42.000000000 -0700
@@ -31,6 +31,7 @@
extern void *__alloc_percpu(size_t size, size_t align, unsigned int gfpflags);
extern void free_percpu(const void *);
+extern void __init alloc_percpu_init(void);
#else /* CONFIG_SMP */
@@ -49,6 +50,8 @@
kfree(ptr);
}
+#define alloc_percpu_init() do {} while (0)
+
#endif /* CONFIG_SMP */
/* Simple wrapper for the common case: zeros memory. */
Index: alloc_percpu-2.6.13/init/main.c
===================================================================
--- alloc_percpu-2.6.13.orig/init/main.c 2005-09-12 12:23:34.000000000 -0700
+++ alloc_percpu-2.6.13/init/main.c 2005-09-12 18:25:05.000000000 -0700
@@ -495,6 +495,9 @@
#endif
vfs_caches_init_early();
mem_init();
+
+ alloc_percpu_init();
+
kmem_cache_init();
setup_per_cpu_pageset();
numa_policy_init();
Index: alloc_percpu-2.6.13/mm/percpu.c
===================================================================
--- alloc_percpu-2.6.13.orig/mm/percpu.c 2005-09-12 12:23:34.000000000 -0700
+++ alloc_percpu-2.6.13/mm/percpu.c 2005-09-12 16:57:49.000000000 -0700
@@ -35,8 +35,9 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/mm.h>
-
#include <linux/sort.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
#endif
@@ -200,8 +201,10 @@
rollback_pages:
j--;
for (; j >= 0; j--)
- if (cpu_possible(j / cpu_pages))
+ if (cpu_possible(j / cpu_pages)) {
__free_pages(blkp->pages[j], 0);
+ blkp->pages[j] = NULL;
+ }
/* Unmap block management */
tmp.addr = area->addr + NR_CPUS * PCPU_BLKSIZE;
@@ -222,6 +225,90 @@
return NULL;
}
+static int __devinit __allocate_chunk(int cpu, struct pcpu_block *blkp)
+{
+ unsigned int cpu_pages = PCPU_BLKSIZE >> PAGE_SHIFT;
+ int start_idx, j;
+ struct vm_struct tmp;
+ struct page **tmppage;
+
+ /* Alloc node local pages for the onlined cpu */
+ start_idx = cpu * cpu_pages;
+
+ if (blkp->pages[start_idx])
+ return 1; /* Already allocated */
+
+ for (j = start_idx; j < start_idx + cpu_pages; j++) {
+ BUG_ON(blkp->pages[j]);
+ blkp->pages[j] = alloc_pages_node(cpu_to_node(cpu),
+ GFP_ATOMIC |
+ __GFP_HIGHMEM,
+ 0);
+ if (unlikely(!blkp->pages[j]))
+ goto rollback_pages;
+ }
+
+ /* Map pages for each cpu by splitting vm_struct for each cpu */
+ tmppage = &blkp->pages[cpu * cpu_pages];
+ tmp.addr = blkp->start_addr + cpu * PCPU_BLKSIZE;
+ /* map_vm_area assumes a guard page of size PAGE_SIZE */
+ tmp.size = PCPU_BLKSIZE + PAGE_SIZE;
+ if (map_vm_area(&tmp, PAGE_KERNEL, &tmppage))
+ goto rollback_pages;
+
+ return 1; /* Success */
+
+rollback_pages:
+ j--;
+ for (; j >= 0; j--) {
+ __free_pages(blkp->pages[j], 0);
+ blkp->pages[j] = NULL;
+ }
+ return 0;
+}
+
+/* Allocate chunks for this cpu in all blocks */
+static int __devinit allocate_chunk(int cpu)
+{
+ struct pcpu_block *blkp = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&blklist_lock, flags);
+ list_for_each_entry(blkp, &blkhead, blklist) {
+ if (!__allocate_chunk(cpu, blkp)) {
+ spin_unlock_irqrestore(&blklist_lock, flags);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&blklist_lock, flags);
+ return 1;
+}
+
+
+static int __devinit alloc_percpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (!allocate_chunk(cpu))
+ return NOTIFY_BAD;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __devinitdata alloc_percpu_nb = {
+ .notifier_call = alloc_percpu_notify,
+};
+
+void __init alloc_percpu_init(void)
+{
+ register_cpu_notifier(&alloc_percpu_nb);
+}
+
/* Free memory block allocated by valloc_percpu */
static void vfree_percpu(void *addr)
{
prev parent reply other threads:[~2005-09-13 16:19 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-09-13 15:51 [patch 0/11] mm: Reimplementation of dynamic per-cpu allocator Ravikiran G Thirumalai
2005-09-13 15:54 ` [patch 1/11] mm: Reimplementation of dynamic per-cpu allocator -- vmalloc_fixup Ravikiran G Thirumalai
2005-09-13 19:25 ` Andrew Morton
2005-09-13 15:56 ` [patch 2/11] mm: Reimplementation of dynamic per-cpu allocator -- alloc_percpu Ravikiran G Thirumalai
2005-09-13 15:58 ` [patch 3/11] mm: Reimplementation of dynamic per-cpu allocator -- alloc_percpu_atomic Ravikiran G Thirumalai
2005-09-13 15:59 ` [patch 4/11] mm: Reimplementation of dynamic per-cpu allocator -- change_alloc_percpu_users Ravikiran G Thirumalai
2005-09-13 16:01 ` [patch 5/11] mm: Bigrefs -- add_getcpuptr Ravikiran G Thirumalai
2005-09-13 16:04 ` [patch 6/11] mm: Bigrefs -- distributed refcounters Ravikiran G Thirumalai
2005-09-13 16:10 ` [patch 7/11] net: Use bigrefs for net_device.refcount Ravikiran G Thirumalai
2005-09-13 16:26 ` Stephen Hemminger
2005-09-13 16:35 ` Ben Greear
2005-09-13 16:46 ` Stephen Hemminger
2005-09-13 20:26 ` David S. Miller
2005-09-13 22:16 ` Ravikiran G Thirumalai
2005-09-13 18:27 ` Eric Dumazet
2005-09-13 18:53 ` Ravikiran G Thirumalai
2005-09-13 16:12 ` [patch 8/11] net: dst_abstraction macros Ravikiran G Thirumalai
2005-09-13 16:17 ` [patch 9/11] net: dst_entry.refcount, use, lastuse to use alloc_percpu Ravikiran G Thirumalai
2005-09-13 20:24 ` David S. Miller
2005-09-13 22:07 ` Ravikiran G Thirumalai
2005-09-13 22:12 ` David S. Miller
2005-09-13 23:17 ` Ravikiran G Thirumalai
2005-09-13 23:27 ` David S. Miller
2005-09-14 7:21 ` Rusty Russell
2005-09-13 16:18 ` [patch 10/11] mm: Reimplementation of dynamic per-cpu allocator -- allow_early_mapvmarea Ravikiran G Thirumalai
2005-09-13 16:19 ` Ravikiran G Thirumalai [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20050913161914.GM3570@localhost.localdomain \
--to=kiran@scalex86.org \
--cc=akpm@osdl.org \
--cc=bharata@in.ibm.com \
--cc=dipankar@in.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=rusty@rustcorp.com.au \
--cc=shai@scalex86.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox