From: Christoph Lameter <cl@linux.com>
To: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: David Rientjes <rientjes@google.com>,
Hugh Dickins <hughd@google.com>,
Eric Dumazet <eric.dumazet@gmail.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
linux-mm@kvack.org
Subject: [slubllv333num@/21] slub: explicit list_lock taking
Date: Fri, 15 Apr 2011 15:12:58 -0500 [thread overview]
Message-ID: <20110415201301.937368741@linux.com> (raw)
In-Reply-To: 20110415201246.096634892@linux.com
[-- Attachment #1: unlock_list_ops --]
[-- Type: text/plain, Size: 6395 bytes --]
The allocator fastpath rework does change the usage of the list_lock.
Remove the list_lock processing from the functions that hide them from the
critical sections and move them into those critical sections.
This is turn simplifies the support functions (no __ variant needed anymore)
and simplifies the lock handling on bootstrap.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 74 ++++++++++++++++++++++++++++++--------------------------------
1 file changed, 36 insertions(+), 38 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-04-15 13:14:51.000000000 -0500
+++ linux-2.6/mm/slub.c 2011-04-15 13:14:54.000000000 -0500
@@ -905,25 +905,21 @@ static inline void slab_free_hook(struct
/*
* Tracking of fully allocated slabs for debugging purposes.
*/
-static void add_full(struct kmem_cache_node *n, struct page *page)
+static void add_full(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
{
- spin_lock(&n->list_lock);
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
list_add(&page->lru, &n->full);
- spin_unlock(&n->list_lock);
}
static void remove_full(struct kmem_cache *s, struct page *page)
{
- struct kmem_cache_node *n;
-
if (!(s->flags & SLAB_STORE_USER))
return;
- n = get_node(s, page_to_nid(page));
-
- spin_lock(&n->list_lock);
list_del(&page->lru);
- spin_unlock(&n->list_lock);
}
/* Tracking of the number of slabs for debugging purposes */
@@ -1048,8 +1044,13 @@ static noinline int free_debug_processin
}
/* Special debug activities for freeing objects */
- if (!page->frozen && !page->freelist)
+ if (!page->frozen && !page->freelist) {
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+ spin_lock(&n->list_lock);
remove_full(s, page);
+ spin_unlock(&n->list_lock);
+ }
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
@@ -1400,36 +1401,26 @@ static __always_inline int slab_trylock(
/*
* Management of partially allocated slabs
*/
-static void add_partial(struct kmem_cache_node *n,
+static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
- spin_lock(&n->list_lock);
n->nr_partial++;
if (tail)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
- spin_unlock(&n->list_lock);
}
-static inline void __remove_partial(struct kmem_cache_node *n,
+static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
list_del(&page->lru);
n->nr_partial--;
}
-static void remove_partial(struct kmem_cache *s, struct page *page)
-{
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
- spin_lock(&n->list_lock);
- __remove_partial(n, page);
- spin_unlock(&n->list_lock);
-}
-
/*
- * Lock slab and remove from the partial list.
+ * Lock slab, remove from the partial list and put the object into the
+ * per cpu freelist.
*
* Must hold list_lock.
*/
@@ -1437,7 +1428,7 @@ static inline int lock_and_freeze_slab(s
struct page *page)
{
if (slab_trylock(page)) {
- __remove_partial(n, page);
+ remove_partial(n, page);
return 1;
}
return 0;
@@ -1554,12 +1545,17 @@ static void unfreeze_slab(struct kmem_ca
if (page->inuse) {
if (page->freelist) {
+ spin_lock(&n->list_lock);
add_partial(n, page, tail);
+ spin_unlock(&n->list_lock);
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
} else {
stat(s, DEACTIVATE_FULL);
- if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
- add_full(n, page);
+ if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) {
+ spin_lock(&n->list_lock);
+ add_full(s, n, page);
+ spin_unlock(&n->list_lock);
+ }
}
slab_unlock(page);
} else {
@@ -1575,7 +1571,9 @@ static void unfreeze_slab(struct kmem_ca
* kmem_cache_shrink can reclaim any empty slabs from
* the partial list.
*/
+ spin_lock(&n->list_lock);
add_partial(n, page, 1);
+ spin_unlock(&n->list_lock);
slab_unlock(page);
} else {
slab_unlock(page);
@@ -2131,7 +2129,11 @@ static void __slab_free(struct kmem_cach
* then add it.
*/
if (unlikely(!prior)) {
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+ spin_lock(&n->list_lock);
add_partial(get_node(s, page_to_nid(page)), page, 1);
+ spin_unlock(&n->list_lock);
stat(s, FREE_ADD_PARTIAL);
}
@@ -2147,7 +2149,11 @@ slab_empty:
/*
* Slab still on the partial list.
*/
- remove_partial(s, page);
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+ spin_lock(&n->list_lock);
+ remove_partial(n, page);
+ spin_unlock(&n->list_lock);
stat(s, FREE_REMOVE_PARTIAL);
}
slab_unlock(page);
@@ -2449,7 +2455,6 @@ static void early_kmem_cache_node_alloc(
{
struct page *page;
struct kmem_cache_node *n;
- unsigned long flags;
BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
@@ -2476,14 +2481,7 @@ static void early_kmem_cache_node_alloc(
init_kmem_cache_node(n, kmem_cache_node);
inc_slabs_node(kmem_cache_node, node, page->objects);
- /*
- * lockdep requires consistent irq usage for each lock
- * so even though there cannot be a race this early in
- * the boot sequence, we still disable irqs.
- */
- local_irq_save(flags);
add_partial(n, page, 0);
- local_irq_restore(flags);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2767,7 +2765,7 @@ static void free_partial(struct kmem_cac
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- __remove_partial(n, page);
+ remove_partial(n, page);
discard_slab(s, page);
} else {
list_slab_objects(s, page,
@@ -3105,7 +3103,7 @@ int kmem_cache_shrink(struct kmem_cache
* may have freed the last object and be
* waiting to release the slab.
*/
- __remove_partial(n, page);
+ remove_partial(n, page);
slab_unlock(page);
discard_slab(s, page);
} else {
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2011-04-15 20:13 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-04-15 20:12 [slubllv333num@/21] SLUB: Lockless freelists for objects V3 Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Use NUMA_NO_NODE in get_partial Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: get_map() function to establish map of free objects in a slab Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Eliminate repeated use of c->page through a new page variable Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Move node determination out of hotpath Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Move debug handlign in __slab_free Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Per object NUMA support Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Do not use frozen page flag but a bit in the page counters Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Move page->frozen handling near where the page->freelist handling occurs Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] x86: Add support for cmpxchg_double Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] mm: Rearrange struct page Christoph Lameter
2011-04-15 20:12 ` [slubllv333num@/21] slub: Add cmpxchg_double_slab() Christoph Lameter
2011-04-15 20:12 ` Christoph Lameter [this message]
2011-04-15 20:12 ` [slubllv333num@/21] slub: Pass kmem_cache struct to lock and freeze slab Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: Rework allocator fastpaths Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: Invert locking and avoid slab lock Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: Disable interrupts in free_debug processing Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: Avoid disabling interrupts in free slowpath Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: Get rid of the another_slab label Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: fast release on full slab Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: Not necessary to check for empty slab on load_freelist Christoph Lameter
2011-04-15 20:13 ` [slubllv333num@/21] slub: update statistics for cmpxchg handling Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110415201301.937368741@linux.com \
--to=cl@linux.com \
--cc=eric.dumazet@gmail.com \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=linux-mm@kvack.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=penberg@cs.helsinki.fi \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).