From: Jesper Dangaard Brouer <brouer@redhat.com>
To: linux-mm@kvack.org, Christoph Lameter <cl@linux.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>,
Andrew Morton <akpm@linux-foundation.org>,
Linus Torvalds <torvalds@linux-foundation.org>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Jesper Dangaard Brouer <brouer@redhat.com>
Subject: [PATCH V2 02/11] mm/slab: move SLUB alloc hooks to common mm/slab.h
Date: Tue, 12 Jan 2016 16:13:59 +0100 [thread overview]
Message-ID: <20160112151352.31725.50235.stgit@firesoul> (raw)
In-Reply-To: <20160112151257.31725.71327.stgit@firesoul>
First step towards sharing alloc_hook's between SLUB and SLAB
allocators. Move the SLUB allocators *_alloc_hook to the common
mm/slab.h for internal slab definitions.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
mm/slab.h | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mm/slub.c | 54 -----------------------------------------------------
2 files changed, 62 insertions(+), 54 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 7b6087197997..92b10da2c71f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -38,6 +38,10 @@ struct kmem_cache {
#endif
#include <linux/memcontrol.h>
+#include <linux/fault-inject.h>
+#include <linux/kmemcheck.h>
+#include <linux/kasan.h>
+#include <linux/kmemleak.h>
/*
* State of the slab allocator.
@@ -319,6 +323,64 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return s;
}
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifndef CONFIG_SLUB
+ return s->object_size;
+
+#else /* CONFIG_SLUB */
+# ifdef CONFIG_SLUB_DEBUG
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->object_size;
+# endif
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+#endif
+}
+
+static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+ gfp_t flags)
+{
+ flags &= gfp_allowed_mask;
+ lockdep_trace_alloc(flags);
+ might_sleep_if(gfpflags_allow_blocking(flags));
+
+ if (should_failslab(s->object_size, flags, s->flags))
+ return NULL;
+
+ return memcg_kmem_get_cache(s, flags);
+}
+
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
+ size_t size, void **p)
+{
+ size_t i;
+
+ flags &= gfp_allowed_mask;
+ for (i = 0; i < size; i++) {
+ void *object = p[i];
+
+ kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
+ kmemleak_alloc_recursive(object, s->object_size, 1,
+ s->flags, flags);
+ kasan_slab_alloc(s, object);
+ }
+ memcg_kmem_put_cache(s);
+}
+
#ifndef CONFIG_SLOB
/*
* The slab lists for all objects.
diff --git a/mm/slub.c b/mm/slub.c
index 65d5f92d51d2..9ef1abc683b2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -284,30 +284,6 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size;
}
-static inline size_t slab_ksize(const struct kmem_cache *s)
-{
-#ifdef CONFIG_SLUB_DEBUG
- /*
- * Debugging requires use of the padding between object
- * and whatever may come after it.
- */
- if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->object_size;
-
-#endif
- /*
- * If we have the need to store the freelist pointer
- * back there or track user information then we can
- * only use the space before that information.
- */
- if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
- */
- return s->size;
-}
-
static inline int order_objects(int order, unsigned long size, int reserved)
{
return ((PAGE_SIZE << order) - reserved) / size;
@@ -1279,36 +1255,6 @@ static inline void kfree_hook(const void *x)
kasan_kfree_large(x);
}
-static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
- gfp_t flags)
-{
- flags &= gfp_allowed_mask;
- lockdep_trace_alloc(flags);
- might_sleep_if(gfpflags_allow_blocking(flags));
-
- if (should_failslab(s->object_size, flags, s->flags))
- return NULL;
-
- return memcg_kmem_get_cache(s, flags);
-}
-
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p)
-{
- size_t i;
-
- flags &= gfp_allowed_mask;
- for (i = 0; i < size; i++) {
- void *object = p[i];
-
- kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
- kmemleak_alloc_recursive(object, s->object_size, 1,
- s->flags, flags);
- kasan_slab_alloc(s, object);
- }
- memcg_kmem_put_cache(s);
-}
-
static inline void slab_free_hook(struct kmem_cache *s, void *x)
{
kmemleak_free_recursive(x, s->flags);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2016-01-12 15:14 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-07 14:03 [PATCH 00/10] MM: More bulk API work Jesper Dangaard Brouer
2016-01-07 14:03 ` [PATCH 01/10] slub: cleanup code for kmem cgroup support to kmem_cache_free_bulk Jesper Dangaard Brouer
2016-01-07 15:54 ` Christoph Lameter
2016-01-07 17:41 ` Jesper Dangaard Brouer
2016-01-08 2:58 ` Joonsoo Kim
2016-01-08 11:05 ` Jesper Dangaard Brouer
2016-01-07 14:03 ` [PATCH 02/10] mm/slab: move SLUB alloc hooks to common mm/slab.h Jesper Dangaard Brouer
2016-01-07 14:03 ` [PATCH 03/10] mm: fault-inject take over bootstrap kmem_cache check Jesper Dangaard Brouer
2016-01-07 14:03 ` [PATCH 04/10] slab: use slab_pre_alloc_hook in SLAB allocator shared with SLUB Jesper Dangaard Brouer
2016-01-08 3:05 ` Joonsoo Kim
2016-01-07 14:03 ` [PATCH 05/10] mm: kmemcheck skip object if slab allocation failed Jesper Dangaard Brouer
2016-01-07 14:04 ` [PATCH 06/10] slab: use slab_post_alloc_hook in SLAB allocator shared with SLUB Jesper Dangaard Brouer
2016-01-07 14:04 ` [PATCH 07/10] slab: implement bulk alloc in SLAB allocator Jesper Dangaard Brouer
2016-01-07 14:04 ` [PATCH 08/10] slab: avoid running debug SLAB code with IRQs disabled for alloc_bulk Jesper Dangaard Brouer
2016-01-07 14:04 ` [PATCH 09/10] slab: implement bulk free in SLAB allocator Jesper Dangaard Brouer
2016-01-07 14:04 ` [PATCH 10/10] mm: new API kfree_bulk() for SLAB+SLUB allocators Jesper Dangaard Brouer
2016-01-08 3:03 ` Joonsoo Kim
2016-01-08 11:20 ` Jesper Dangaard Brouer
2016-01-07 18:54 ` [PATCH 00/10] MM: More bulk API work Linus Torvalds
2016-01-12 15:13 ` [PATCH V2 00/11] " Jesper Dangaard Brouer
2016-01-12 15:13 ` [PATCH V2 01/11] slub: cleanup code for kmem cgroup support to kmem_cache_free_bulk Jesper Dangaard Brouer
2016-01-12 15:13 ` Jesper Dangaard Brouer [this message]
2016-01-12 15:14 ` [PATCH V2 03/11] mm: fault-inject take over bootstrap kmem_cache check Jesper Dangaard Brouer
2016-01-12 15:14 ` [PATCH V2 04/11] slab: use slab_pre_alloc_hook in SLAB allocator shared with SLUB Jesper Dangaard Brouer
2016-01-12 15:14 ` [PATCH V2 05/11] mm: kmemcheck skip object if slab allocation failed Jesper Dangaard Brouer
2016-01-12 15:14 ` [PATCH V2 06/11] slab: use slab_post_alloc_hook in SLAB allocator shared with SLUB Jesper Dangaard Brouer
2016-01-12 15:15 ` [PATCH V2 07/11] slab: implement bulk alloc in SLAB allocator Jesper Dangaard Brouer
2016-01-12 15:15 ` [PATCH V2 08/11] slab: avoid running debug SLAB code with IRQs disabled for alloc_bulk Jesper Dangaard Brouer
2016-01-12 15:15 ` [PATCH V2 09/11] slab: implement bulk free in SLAB allocator Jesper Dangaard Brouer
2016-01-12 15:16 ` [PATCH V2 10/11] mm: new API kfree_bulk() for SLAB+SLUB allocators Jesper Dangaard Brouer
2016-01-12 15:16 ` [PATCH V2 11/11] mm: fix some spelling Jesper Dangaard Brouer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160112151352.31725.50235.stgit@firesoul \
--to=brouer@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-mm@kvack.org \
--cc=torvalds@linux-foundation.org \
--cc=vdavydov@virtuozzo.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).