From: Christoph Lameter <cl@linux.com>
To: akpm@linuxfoundation.org
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
penberg@kernel.org, iamjoonsoo@lge.com,
Jesper Dangaard Brouer <brouer@redhat.com>
Subject: [PATCH 2/3] slub: Support for array operations
Date: Tue, 10 Feb 2015 13:48:06 -0600 [thread overview]
Message-ID: <20150210194811.902155759@linux.com> (raw)
In-Reply-To: 20150210194804.288708936@linux.com
[-- Attachment #1: array_alloc_slub --]
[-- Type: text/plain, Size: 5379 bytes --]
The major portions are there but there is no support yet for
directly allocating per cpu objects. There could also be more
sophisticated code to exploit the batch freeing.
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h
+++ linux/include/linux/slub_def.h
@@ -110,4 +110,5 @@ static inline void sysfs_slab_remove(str
}
#endif
+#define _HAVE_SLAB_ALLOCATOR_ARRAY_OPERATIONS
#endif /* _LINUX_SLUB_DEF_H */
Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1379,13 +1379,9 @@ static void setup_object(struct kmem_cac
s->ctor(object);
}
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+static struct page *__new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
- void *start;
- void *p;
- int order;
- int idx;
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
@@ -1394,33 +1390,42 @@ static struct page *new_slab(struct kmem
page = allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
- if (!page)
- goto out;
+ if (page) {
+ inc_slabs_node(s, page_to_nid(page), page->objects);
+ page->slab_cache = s;
+ __SetPageSlab(page);
+ if (page->pfmemalloc)
+ SetPageSlabPfmemalloc(page);
+ }
- order = compound_order(page);
- inc_slabs_node(s, page_to_nid(page), page->objects);
- page->slab_cache = s;
- __SetPageSlab(page);
- if (page->pfmemalloc)
- SetPageSlabPfmemalloc(page);
-
- start = page_address(page);
-
- if (unlikely(s->flags & SLAB_POISON))
- memset(start, POISON_INUSE, PAGE_SIZE << order);
-
- for_each_object_idx(p, idx, s, start, page->objects) {
- setup_object(s, page, p);
- if (likely(idx < page->objects))
- set_freepointer(s, p, p + s->size);
- else
- set_freepointer(s, p, NULL);
- }
-
- page->freelist = start;
- page->inuse = page->objects;
- page->frozen = 1;
-out:
+ return page;
+}
+
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+ struct page *page = __new_slab(s, flags, node);
+
+ if (page) {
+ void *p;
+ int idx;
+ void *start = page_address(page);
+
+ if (unlikely(s->flags & SLAB_POISON))
+ memset(start, POISON_INUSE,
+ PAGE_SIZE << compound_order(page));
+
+ for_each_object_idx(p, idx, s, start, page->objects) {
+ setup_object(s, page, p);
+ if (likely(idx < page->objects))
+ set_freepointer(s, p, p + s->size);
+ else
+ set_freepointer(s, p, NULL);
+ }
+
+ page->freelist = start;
+ page->inuse = page->objects;
+ page->frozen = 1;
+ }
return page;
}
@@ -2516,8 +2521,78 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trac
#endif
#endif
+int slab_array_alloc_from_partial(struct kmem_cache *s,
+ size_t nr, void **p)
+{
+ void **end = p + nr;
+ struct kmem_cache_node *n = get_node(s, numa_mem_id());
+ int allocated = 0;
+ unsigned long flags;
+ struct page *page, *page2;
+
+ if (!n->nr_partial)
+ return 0;
+
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry_safe(page, page2, &n->partial, lru) {
+ void *freelist;
+
+ if (page->objects - page->inuse > end - p)
+ /* More objects free in page than we want */
+ break;
+ list_del(&page->lru);
+ slab_lock(page);
+ freelist = page->freelist;
+ page->inuse = page->objects;
+ page->freelist = NULL;
+ slab_unlock(page);
+ /* Grab all available objects */
+ while (freelist) {
+ *p++ = freelist;
+ freelist = get_freepointer(s, freelist);
+ allocated++;
+ }
+ }
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return allocated;
+}
+
+int slab_array_alloc_from_page_allocator(struct kmem_cache *s,
+ gfp_t flags, size_t nr, void **p)
+{
+ void **end = p + nr;
+ int allocated = 0;
+
+ while (end - p >= oo_objects(s->oo)) {
+ struct page *page = __new_slab(s, flags, NUMA_NO_NODE);
+ void *q = page_address(page);
+ int i;
+
+ /* Use all the objects */
+ for (i = 0; i < page->objects; i++) {
+ setup_object(s, page, q);
+ *p++ = q;
+ q += s->size;
+ }
+
+ page->inuse = page->objects;
+ page->freelist = NULL;
+ allocated += page->objects;
+ }
+ return allocated;
+}
+
+int slab_array_alloc_from_local(struct kmem_cache *s,
+ size_t nr, void **p)
+{
+ /* Go for the per cpu partials list first */
+ /* Use the cpu_slab if objects are still needed */
+ return 0;
+}
+
/*
- * Slow patch handling. This may still be called frequently since objects
+ * Slow path handling. This may still be called frequently since objects
* have a longer lifetime than the cpu slabs in most processing loads.
*
* So we still attempt to reduce cache line usage. Just take the slab
@@ -2637,6 +2712,14 @@ slab_empty:
discard_slab(s, page);
}
+void kmem_cache_free_array(struct kmem_cache *s, size_t nr, void **p)
+{
+ void **end = p + nr;
+
+ for ( ; p < end; p++)
+ __slab_free(s, virt_to_head_page(p), p, 0);
+}
+
/*
* Fastpath with forced inlining to produce a kfree and kmem_cache_free that
* can perform fastpath freeing without additional function calls.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2015-02-10 19:48 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-02-10 19:48 [PATCH 0/3] Slab allocator array operations V2 Christoph Lameter
2015-02-10 19:48 ` [PATCH 1/3] Slab infrastructure for array operations Christoph Lameter
2015-02-10 22:43 ` Jesper Dangaard Brouer
2015-02-10 23:58 ` David Rientjes
2015-02-11 18:47 ` Christoph Lameter
2015-02-11 20:18 ` David Rientjes
2015-02-11 22:04 ` Christoph Lameter
2015-02-12 0:35 ` David Rientjes
2015-02-13 2:35 ` Joonsoo Kim
2015-02-13 15:47 ` Christoph Lameter
2015-02-13 21:20 ` David Rientjes
2015-02-17 5:15 ` Joonsoo Kim
2015-02-17 16:03 ` Christoph Lameter
2015-02-17 21:32 ` Jesper Dangaard Brouer
2015-02-18 23:02 ` Christoph Lameter
2015-02-10 19:48 ` Christoph Lameter [this message]
2015-02-11 4:48 ` [PATCH 2/3] slub: Support " Jesper Dangaard Brouer
2015-02-11 19:07 ` Christoph Lameter
2015-02-11 21:43 ` Jesper Dangaard Brouer
2015-02-11 22:06 ` Christoph Lameter
2015-02-12 0:16 ` Jesper Dangaard Brouer
2015-02-12 2:46 ` Christoph Lameter
2015-02-13 2:45 ` Joonsoo Kim
2015-02-13 15:49 ` Christoph Lameter
2015-02-17 5:26 ` Joonsoo Kim
2015-02-10 19:48 ` [PATCH 3/3] Array alloc test code Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20150210194811.902155759@linux.com \
--to=cl@linux.com \
--cc=akpm@linuxfoundation.org \
--cc=brouer@redhat.com \
--cc=iamjoonsoo@lge.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).