From: Christoph Lameter <cl@linux.com>
To: Pekka Enberg <penberg@kernel.org>
Cc: linux-mm@kvack.org, David Rientjes <rientjes@google.com>,
Matt Mackall <mpm@selenic.com>,
Glauber Costa <glommer@parallels.com>,
Joonsoo Kim <js1304@gmail.com>
Subject: Common [20/20] Common alignment code
Date: Wed, 13 Jun 2012 10:25:11 -0500 [thread overview]
Message-ID: <20120613152526.181228151@linux.com> (raw)
In-Reply-To: 20120613152451.465596612@linux.com
[-- Attachment #1: common_alignment --]
[-- Type: text/plain, Size: 8314 bytes --]
Extract the code to do object alignment from the allocators.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slab.c | 22 +---------------------
mm/slab.h | 3 +++
mm/slab_common.c | 30 +++++++++++++++++++++++++++++-
mm/slob.c | 11 -----------
mm/slub.c | 45 ++++++++-------------------------------------
5 files changed, 41 insertions(+), 70 deletions(-)
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c 2012-06-13 03:44:57.145477117 -0500
+++ linux-2.6/mm/slab.c 2012-06-13 03:45:03.621476983 -0500
@@ -1439,7 +1439,7 @@ struct kmem_cache *create_kmalloc_cache(
s->name = name;
s->size = s->object_size = size;
- s->align = ARCH_KMALLOC_MINALIGN;
+ s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
s->flags = flags | ARCH_KMALLOC_FLAGS;
r = __kmem_cache_create(s);
@@ -2217,22 +2217,6 @@ int __kmem_cache_create(struct kmem_cach
size &= ~(BYTES_PER_WORD - 1);
}
- /* calculate the final buffer alignment: */
-
- /* 1) arch recommendation: can be overridden for debug */
- if (flags & SLAB_HWCACHE_ALIGN) {
- /*
- * Default alignment: as specified by the arch code. Except if
- * an object is really small, then squeeze multiple objects into
- * one cacheline.
- */
- ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- } else {
- ralign = BYTES_PER_WORD;
- }
-
/*
* Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated
@@ -2249,10 +2233,6 @@ int __kmem_cache_create(struct kmem_cach
size &= ~(REDZONE_ALIGN - 1);
}
- /* 2) arch mandated alignment */
- if (ralign < ARCH_SLAB_MINALIGN) {
- ralign = ARCH_SLAB_MINALIGN;
- }
/* 3) caller mandated alignment */
if (ralign < cachep->align) {
ralign = cachep->align;
Index: linux-2.6/mm/slab_common.c
===================================================================
--- linux-2.6.orig/mm/slab_common.c 2012-06-13 03:44:57.201477116 -0500
+++ linux-2.6/mm/slab_common.c 2012-06-13 03:45:03.621476983 -0500
@@ -25,6 +25,34 @@ DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
+
+/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
@@ -118,7 +146,7 @@ struct kmem_cache *kmem_cache_create(con
s->size = s->object_size = size;
s->ctor = ctor;
s->flags = flags;
- s->align = align;
+ s->align = calculate_alignment(flags, align, size);
r = __kmem_cache_create(s);
Index: linux-2.6/mm/slob.c
===================================================================
--- linux-2.6.orig/mm/slob.c 2012-06-13 03:44:57.125477118 -0500
+++ linux-2.6/mm/slob.c 2012-06-13 03:45:03.621476983 -0500
@@ -124,7 +124,6 @@ static inline void clear_slob_page_free(
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
-#define SLOB_ALIGN L1_CACHE_BYTES
/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -510,20 +509,10 @@ EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c)
{
- int align = c->align;
-
if (c->flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
- /* ignore alignment unless it's forced */
- c->align = (c->flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
- if (c->align < ARCH_SLAB_MINALIGN)
- c->align = ARCH_SLAB_MINALIGN;
- if (c->align < align)
- c->align = align;
-
- kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
return 0;
}
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2012-06-13 03:44:57.161477117 -0500
+++ linux-2.6/mm/slub.c 2012-06-13 03:45:03.625476983 -0500
@@ -2737,32 +2737,6 @@ static inline int calculate_order(int si
return -ENOSYS;
}
-/*
- * Figure out what the alignment of the objects will be.
- */
-static unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
@@ -2968,14 +2942,6 @@ static int calculate_sizes(struct kmem_c
#endif
/*
- * Determine the alignment based on various parameters that the
- * user specified and the dynamic determination of cache line size
- * on bootup.
- */
- align = calculate_alignment(flags, align, s->object_size);
- s->align = align;
-
- /*
* SLUB stores one object immediately after another beginning from
* offset 0. In order to align the objects we have to simply size
* each object to conform to the alignment.
@@ -3009,7 +2975,6 @@ static int calculate_sizes(struct kmem_c
s->max = s->oo;
return !!oo_objects(s->oo);
-
}
static int kmem_cache_open(struct kmem_cache *s)
@@ -3233,7 +3198,7 @@ static struct kmem_cache *__init create_
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
s->name = name;
s->size = s->object_size = size;
- s->align = ARCH_KMALLOC_MINALIGN;
+ s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
s->flags = flags;
/*
@@ -3694,6 +3659,8 @@ void __init kmem_cache_init(void)
kmem_cache_node->name = "kmem_cache_node";
kmem_cache_node->size = kmem_cache_node->object_size = sizeof(struct kmem_cache_node);
kmem_cache_node->flags = SLAB_HWCACHE_ALIGN;
+ kmem_cache_node->align = calculate_alignment(SLAB_HWCACHE_ALIGN,
+ 0, sizeof(struct kmem_cache_node));
r = kmem_cache_open(kmem_cache_node);
if (r)
@@ -3708,6 +3675,8 @@ void __init kmem_cache_init(void)
kmem_cache->name = "kmem_cache";
kmem_cache->size = kmem_cache->object_size = kmem_size;
kmem_cache->flags = SLAB_HWCACHE_ALIGN;
+ kmem_cache->align = calculate_alignment(SLAB_HWCACHE_ALIGN,
+ 0, sizeof(struct kmem_cache));
r = kmem_cache_open(kmem_cache);
if (r)
@@ -3931,7 +3900,9 @@ struct kmem_cache *__kmem_cache_alias(co
int __kmem_cache_create(struct kmem_cache *s)
{
- int r = kmem_cache_open(s);
+ int r;
+
+ r = kmem_cache_open(s);
if (r)
return r;
Index: linux-2.6/mm/slab.h
===================================================================
--- linux-2.6.orig/mm/slab.h 2012-06-13 03:44:57.177477117 -0500
+++ linux-2.6/mm/slab.h 2012-06-13 03:45:03.625476983 -0500
@@ -32,6 +32,9 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
+unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size);
+
/* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *s);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2012-06-13 15:25 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-06-13 15:24 Common [00/20] Sl[auo]b: Common code rework V5 (for merge) Christoph Lameter
2012-06-13 15:24 ` Common [01/20] [slob] Define page struct fields used in mm_types.h Christoph Lameter
2012-06-13 15:24 ` Common [03/20] [slob] Remove various small accessors Christoph Lameter
2012-06-13 15:24 ` Common [04/20] [slab] Use page struct fields instead of casting Christoph Lameter
2012-06-13 15:24 ` Common [05/20] [slab] Remove some accessors Christoph Lameter
2012-06-13 15:24 ` Common [06/20] Extract common fields from struct kmem_cache Christoph Lameter
2012-06-13 15:24 ` Common [07/20] [slab] Get rid of obj_size macro Christoph Lameter
2012-06-13 15:24 ` Common [08/20] Extract common code for kmem_cache_create() Christoph Lameter
2012-06-14 8:15 ` Glauber Costa
2012-06-14 14:18 ` Christoph Lameter
2012-06-14 14:20 ` Glauber Costa
2012-06-14 14:46 ` Christoph Lameter
2012-06-13 15:25 ` Common [09/20] Common definition for boot state of the slab allocators Christoph Lameter
2012-06-13 15:25 ` Common [10/20] Use a common mutex definition Christoph Lameter
2012-06-13 15:25 ` Common [11/20] Move kmem_cache_create mutex handling to common code Christoph Lameter
2012-06-13 15:25 ` Common [13/20] Extract a common function for kmem_cache_destroy Christoph Lameter
2012-06-13 15:25 ` Common [14/20] Always use the name "kmem_cache" for the slab cache with the kmem_cache structure Christoph Lameter
2012-06-14 8:16 ` Glauber Costa
2012-06-14 14:12 ` Christoph Lameter
2012-06-13 15:25 ` Common [16/20] Get rid of __kmem_cache_destroy Christoph Lameter
2012-06-13 15:25 ` Common [17/20] Move duping of slab name to slab_common.c Christoph Lameter
2012-06-14 8:19 ` Glauber Costa
2012-06-13 15:25 ` Common [18/20] Do slab aliasing call from common code Christoph Lameter
2012-06-13 15:25 ` Common [19/20] Allocate kmem_cache structure in slab_common.c Christoph Lameter
2012-06-14 10:16 ` Pekka Enberg
2012-06-14 10:34 ` Glauber Costa
2012-06-14 18:14 ` Christoph Lameter
2012-06-20 7:07 ` Pekka Enberg
2012-06-13 15:25 ` Christoph Lameter [this message]
2012-06-14 8:14 ` Common [00/20] Sl[auo]b: Common code rework V5 (for merge) Glauber Costa
2012-06-14 14:03 ` Christoph Lameter
-- strict thread matches above, loose matches on Subject: below --
2012-06-01 19:52 Common [00/20] Sl[auo]b: Common code rework V4 Christoph Lameter
2012-06-01 19:53 ` Common [20/20] Common alignment code Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120613152526.181228151@linux.com \
--to=cl@linux.com \
--cc=glommer@parallels.com \
--cc=js1304@gmail.com \
--cc=linux-mm@kvack.org \
--cc=mpm@selenic.com \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).