From: Tejun Heo <teheo@suse.de>
To: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux-foundation.org>,
Pekka Enberg <penberg@cs.helsinki.fi>,
linux-mm@kvack.org, Nick Piggin <npiggin@suse.de>,
Matt Mackall <mpm@selenic.com>
Subject: [S+Q 02/16] [PATCH 1/2 UPDATED] percpu: make @dyn_size always mean min dyn_size in first chunk init functions
Date: Sun, 27 Jun 2010 18:57:29 +0200 [thread overview]
Message-ID: <4C2782F9.6030803@suse.de> (raw)
In-Reply-To: <4C270A09.3070305@kernel.org>
In pcpu_build_alloc_info() and pcpu_embed_first_chunk(), @dyn_size was
ssize_t, -1 meant auto-size, 0 forced 0 and positive meant minimum
size. There's no use case for forcing 0 and the upcoming early alloc
support always requires non-zero dynamic size. Make @dyn_size always
mean minimum dyn_size.
While at it, make pcpu_build_alloc_info() static which doesn't have
any external caller as suggested by David Rientjes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: David Rientjes <rientjes@google.com>
---
Here's the updated patch. I've pushed out this and the second patch
to linux-next. Please feel free to pull from the following git tree.
git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git for-next
I'll apply 09 once how it's gonna be handled is determined.
Thanks.
include/linux/percpu.h | 7 +------
mm/percpu.c | 35 ++++++++++-------------------------
2 files changed, 11 insertions(+), 31 deletions(-)
Index: work/include/linux/percpu.h
===================================================================
--- work.orig/include/linux/percpu.h
+++ work/include/linux/percpu.h
@@ -104,16 +104,11 @@ extern struct pcpu_alloc_info * __init p
int nr_units);
extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
-extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
- size_t reserved_size, ssize_t dyn_size,
- size_t atom_size,
- pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
-
extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
-extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
+extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
pcpu_fc_alloc_fn_t alloc_fn,
Index: work/mm/percpu.c
===================================================================
--- work.orig/mm/percpu.c
+++ work/mm/percpu.c
@@ -1013,20 +1013,6 @@ phys_addr_t per_cpu_ptr_to_phys(void *ad
return page_to_phys(pcpu_addr_to_page(addr));
}
-static inline size_t pcpu_calc_fc_sizes(size_t static_size,
- size_t reserved_size,
- ssize_t *dyn_sizep)
-{
- size_t size_sum;
-
- size_sum = PFN_ALIGN(static_size + reserved_size +
- (*dyn_sizep >= 0 ? *dyn_sizep : 0));
- if (*dyn_sizep != 0)
- *dyn_sizep = size_sum - static_size - reserved_size;
-
- return size_sum;
-}
-
/**
* pcpu_alloc_alloc_info - allocate percpu allocation info
* @nr_groups: the number of groups
@@ -1085,7 +1071,7 @@ void __init pcpu_free_alloc_info(struct
/**
* pcpu_build_alloc_info - build alloc_info considering distances between CPUs
* @reserved_size: the size of reserved percpu area in bytes
- * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @dyn_size: minimum free size for dynamic allocation in bytes
* @atom_size: allocation atom size
* @cpu_distance_fn: callback to determine distance between cpus, optional
*
@@ -1103,8 +1089,8 @@ void __init pcpu_free_alloc_info(struct
* On success, pointer to the new allocation_info is returned. On
* failure, ERR_PTR value is returned.
*/
-struct pcpu_alloc_info * __init pcpu_build_alloc_info(
- size_t reserved_size, ssize_t dyn_size,
+static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
+ size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
@@ -1123,13 +1109,15 @@ struct pcpu_alloc_info * __init pcpu_bui
memset(group_map, 0, sizeof(group_map));
memset(group_cnt, 0, sizeof(group_cnt));
+ size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size);
+ dyn_size = size_sum - static_size - reserved_size;
+
/*
* Determine min_unit_size, alloc_size and max_upa such that
* alloc_size is multiple of atom_size and is the smallest
* which can accomodate 4k aligned segments which are equal to
* or larger than min_unit_size.
*/
- size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
alloc_size = roundup(min_unit_size, atom_size);
@@ -1532,7 +1520,7 @@ early_param("percpu_alloc", percpu_alloc
/**
* pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
* @reserved_size: the size of reserved percpu area in bytes
- * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @dyn_size: minimum free size for dynamic allocation in bytes
* @atom_size: allocation atom size
* @cpu_distance_fn: callback to determine distance between cpus, optional
* @alloc_fn: function to allocate percpu page
@@ -1553,10 +1541,7 @@ early_param("percpu_alloc", percpu_alloc
* vmalloc space is not orders of magnitude larger than distances
* between node memory addresses (ie. 32bit NUMA machines).
*
- * When @dyn_size is positive, dynamic area might be larger than
- * specified to fill page alignment. When @dyn_size is auto,
- * @dyn_size is just big enough to fill page alignment after static
- * and reserved areas.
+ * @dyn_size specifies the minimum dynamic area size.
*
* If the needed size is smaller than the minimum or specified unit
* size, the leftover is returned using @free_fn.
@@ -1564,7 +1549,7 @@ early_param("percpu_alloc", percpu_alloc
* RETURNS:
* 0 on success, -errno on failure.
*/
-int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
+int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
pcpu_fc_alloc_fn_t alloc_fn,
@@ -1695,7 +1680,7 @@ int __init pcpu_page_first_chunk(size_t
snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
- ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
+ ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
if (IS_ERR(ai))
return PTR_ERR(ai);
BUG_ON(ai->nr_groups != 1);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-06-27 16:57 UTC|newest]
Thread overview: 68+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-06-25 21:20 [S+Q 00/16] SLUB with Queueing beats SLAB in hackbench Christoph Lameter
2010-06-25 21:20 ` [S+Q 01/16] [PATCH] ipc/sem.c: Bugfix for semop() not reporting successful operation Christoph Lameter
2010-06-28 2:17 ` KAMEZAWA Hiroyuki
2010-06-28 16:45 ` Manfred Spraul
2010-06-28 23:58 ` KAMEZAWA Hiroyuki
2010-06-28 16:48 ` Pekka Enberg
2010-06-29 15:42 ` Christoph Lameter
2010-06-29 19:08 ` Andrew Morton
2010-06-30 19:38 ` Manfred Spraul
2010-06-30 19:51 ` Andrew Morton
2010-06-25 21:20 ` [S+Q 02/16] [PATCH 1/2] percpu: make @dyn_size always mean min dyn_size in first chunk init functions Christoph Lameter
2010-06-27 5:06 ` David Rientjes
2010-06-27 8:21 ` Tejun Heo
2010-06-27 16:57 ` Tejun Heo [this message]
2010-06-27 19:25 ` [S+Q 02/16] [PATCH 1/2 UPDATED] " David Rientjes
2010-06-27 19:24 ` [S+Q 02/16] [PATCH 1/2] " David Rientjes
2010-06-29 15:36 ` Christoph Lameter
2010-06-25 21:20 ` [S+Q 03/16] [PATCH 2/2] percpu: allow limited allocation before slab is online Christoph Lameter
2010-06-25 21:20 ` [S+Q 04/16] slub: Use a constant for a unspecified node Christoph Lameter
2010-06-28 2:25 ` KAMEZAWA Hiroyuki
2010-06-29 15:38 ` Christoph Lameter
2010-06-25 21:20 ` [S+Q 05/16] SLUB: Constants need UL Christoph Lameter
2010-06-26 23:31 ` David Rientjes
2010-06-28 2:27 ` KAMEZAWA Hiroyuki
2010-06-25 21:20 ` [S+Q 06/16] slub: Use kmem_cache flags to detect if slab is in debugging mode Christoph Lameter
2010-06-26 23:31 ` David Rientjes
2010-06-25 21:20 ` [S+Q 07/16] slub: discard_slab_unlock Christoph Lameter
2010-06-26 23:34 ` David Rientjes
2010-07-06 20:44 ` Christoph Lameter
2010-06-25 21:20 ` [S+Q 08/16] slub: remove dynamic dma slab allocation Christoph Lameter
2010-06-26 23:52 ` David Rientjes
2010-06-29 15:31 ` Christoph Lameter
2010-06-28 2:33 ` KAMEZAWA Hiroyuki
2010-06-29 15:41 ` Christoph Lameter
2010-06-30 0:26 ` KAMEZAWA Hiroyuki
2010-06-25 21:20 ` [S+Q 09/16] [percpu] make allocpercpu usable during early boot Christoph Lameter
2010-06-26 8:10 ` Tejun Heo
2010-06-26 23:53 ` David Rientjes
2010-06-29 15:15 ` Christoph Lameter
2010-06-29 15:30 ` Tejun Heo
2010-07-06 20:41 ` Christoph Lameter
2010-06-26 23:38 ` David Rientjes
2010-06-29 15:26 ` Christoph Lameter
2010-06-28 17:03 ` Pekka Enberg
2010-06-29 15:45 ` Christoph Lameter
2010-07-01 6:23 ` Pekka Enberg
2010-07-06 14:32 ` Christoph Lameter
2010-07-31 9:39 ` Pekka Enberg
2010-06-25 21:20 ` [S+Q 10/16] slub: Remove static kmem_cache_cpu array for boot Christoph Lameter
2010-06-27 0:02 ` David Rientjes
2010-06-29 15:35 ` Christoph Lameter
2010-06-25 21:20 ` [S+Q 11/16] slub: Dynamically size kmalloc cache allocations Christoph Lameter
2010-06-25 21:20 ` [S+Q 12/16] SLUB: Add SLAB style per cpu queueing Christoph Lameter
2010-06-26 2:32 ` Nick Piggin
2010-06-28 10:19 ` Christoph Lameter
2010-06-25 21:20 ` [S+Q 13/16] SLUB: Resize the new cpu queues Christoph Lameter
2010-06-25 21:20 ` [S+Q 14/16] SLUB: Get rid of useless function count_free() Christoph Lameter
2010-06-25 21:20 ` [S+Q 15/16] SLUB: Remove MAX_OBJS limitation Christoph Lameter
2010-06-25 21:20 ` [S+Q 16/16] slub: Drop allocator announcement Christoph Lameter
2010-06-26 2:24 ` [S+Q 00/16] SLUB with Queueing beats SLAB in hackbench Nick Piggin
2010-06-28 6:18 ` Pekka Enberg
2010-06-28 10:12 ` Christoph Lameter
2010-06-28 15:18 ` Pekka Enberg
2010-06-28 18:54 ` David Rientjes
2010-06-29 15:23 ` Christoph Lameter
2010-06-29 15:55 ` Mike Travis
2010-06-29 15:21 ` Christoph Lameter
2010-06-28 14:46 ` Matt Mackall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4C2782F9.6030803@suse.de \
--to=teheo@suse.de \
--cc=cl@linux-foundation.org \
--cc=linux-mm@kvack.org \
--cc=mpm@selenic.com \
--cc=npiggin@suse.de \
--cc=penberg@cs.helsinki.fi \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).