From: Christoph Lameter <cl@linux.com>
To: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: David Rientjes <rientjes@google.com>,
Eric Dumazet <eric.dumazet@gmail.com>,
"H. Peter Anvin" <hpa@zytor.com>,
linux-mm@kvack.org, Thomas Gleixner <tglx@linutronix.de>
Subject: [slubllv5 10/25] slub: explicit list_lock taking
Date: Mon, 16 May 2011 15:26:15 -0500 [thread overview]
Message-ID: <20110516202626.965065592@linux.com> (raw)
In-Reply-To: 20110516202605.274023469@linux.com
[-- Attachment #1: unlock_list_ops --]
[-- Type: text/plain, Size: 6395 bytes --]
The allocator fastpath rework does change the usage of the list_lock.
Remove the list_lock processing from the functions that hide them from the
critical sections and move them into those critical sections.
This in turn simplifies the support functions (no __ variant needed anymore)
and simplifies the lock handling on bootstrap.
Signed-off-by: Christoph Lameter <cl@linux.com>
---
mm/slub.c | 74 ++++++++++++++++++++++++++++++--------------------------------
1 file changed, 36 insertions(+), 38 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-05-16 11:46:51.181463060 -0500
+++ linux-2.6/mm/slub.c 2011-05-16 11:46:58.311463052 -0500
@@ -917,25 +917,21 @@ static inline void slab_free_hook(struct
/*
* Tracking of fully allocated slabs for debugging purposes.
*/
-static void add_full(struct kmem_cache_node *n, struct page *page)
+static void add_full(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
{
- spin_lock(&n->list_lock);
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
list_add(&page->lru, &n->full);
- spin_unlock(&n->list_lock);
}
static void remove_full(struct kmem_cache *s, struct page *page)
{
- struct kmem_cache_node *n;
-
if (!(s->flags & SLAB_STORE_USER))
return;
- n = get_node(s, page_to_nid(page));
-
- spin_lock(&n->list_lock);
list_del(&page->lru);
- spin_unlock(&n->list_lock);
}
/* Tracking of the number of slabs for debugging purposes */
@@ -1060,8 +1056,13 @@ static noinline int free_debug_processin
}
/* Special debug activities for freeing objects */
- if (!page->frozen && !page->freelist)
+ if (!page->frozen && !page->freelist) {
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+ spin_lock(&n->list_lock);
remove_full(s, page);
+ spin_unlock(&n->list_lock);
+ }
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
@@ -1420,36 +1421,26 @@ static __always_inline int slab_trylock(
/*
* Management of partially allocated slabs
*/
-static void add_partial(struct kmem_cache_node *n,
+static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
- spin_lock(&n->list_lock);
n->nr_partial++;
if (tail)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
- spin_unlock(&n->list_lock);
}
-static inline void __remove_partial(struct kmem_cache_node *n,
+static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
list_del(&page->lru);
n->nr_partial--;
}
-static void remove_partial(struct kmem_cache *s, struct page *page)
-{
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
- spin_lock(&n->list_lock);
- __remove_partial(n, page);
- spin_unlock(&n->list_lock);
-}
-
/*
- * Lock slab and remove from the partial list.
+ * Lock slab, remove from the partial list and put the object into the
+ * per cpu freelist.
*
* Must hold list_lock.
*/
@@ -1457,7 +1448,7 @@ static inline int lock_and_freeze_slab(s
struct page *page)
{
if (slab_trylock(page)) {
- __remove_partial(n, page);
+ remove_partial(n, page);
return 1;
}
return 0;
@@ -1574,12 +1565,17 @@ static void unfreeze_slab(struct kmem_ca
if (page->inuse) {
if (page->freelist) {
+ spin_lock(&n->list_lock);
add_partial(n, page, tail);
+ spin_unlock(&n->list_lock);
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
} else {
stat(s, DEACTIVATE_FULL);
- if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
- add_full(n, page);
+ if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) {
+ spin_lock(&n->list_lock);
+ add_full(s, n, page);
+ spin_unlock(&n->list_lock);
+ }
}
slab_unlock(page);
} else {
@@ -1595,7 +1591,9 @@ static void unfreeze_slab(struct kmem_ca
* kmem_cache_shrink can reclaim any empty slabs from
* the partial list.
*/
+ spin_lock(&n->list_lock);
add_partial(n, page, 1);
+ spin_unlock(&n->list_lock);
slab_unlock(page);
} else {
slab_unlock(page);
@@ -2095,7 +2093,11 @@ static void __slab_free(struct kmem_cach
* then add it.
*/
if (unlikely(!prior)) {
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+ spin_lock(&n->list_lock);
add_partial(get_node(s, page_to_nid(page)), page, 1);
+ spin_unlock(&n->list_lock);
stat(s, FREE_ADD_PARTIAL);
}
@@ -2109,7 +2111,11 @@ slab_empty:
/*
* Slab still on the partial list.
*/
- remove_partial(s, page);
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+ spin_lock(&n->list_lock);
+ remove_partial(n, page);
+ spin_unlock(&n->list_lock);
stat(s, FREE_REMOVE_PARTIAL);
}
slab_unlock(page);
@@ -2391,7 +2397,6 @@ static void early_kmem_cache_node_alloc(
{
struct page *page;
struct kmem_cache_node *n;
- unsigned long flags;
BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
@@ -2418,14 +2423,7 @@ static void early_kmem_cache_node_alloc(
init_kmem_cache_node(n, kmem_cache_node);
inc_slabs_node(kmem_cache_node, node, page->objects);
- /*
- * lockdep requires consistent irq usage for each lock
- * so even though there cannot be a race this early in
- * the boot sequence, we still disable irqs.
- */
- local_irq_save(flags);
add_partial(n, page, 0);
- local_irq_restore(flags);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2709,7 +2707,7 @@ static void free_partial(struct kmem_cac
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- __remove_partial(n, page);
+ remove_partial(n, page);
discard_slab(s, page);
} else {
list_slab_objects(s, page,
@@ -3047,7 +3045,7 @@ int kmem_cache_shrink(struct kmem_cache
* may have freed the last object and be
* waiting to release the slab.
*/
- __remove_partial(n, page);
+ remove_partial(n, page);
slab_unlock(page);
discard_slab(s, page);
} else {
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2011-05-16 20:26 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-16 20:26 [slubllv5 00/25] SLUB: Lockless freelists for objects V5 Christoph Lameter
2011-05-16 20:26 ` [slubllv5 01/25] slub: Avoid warning for !CONFIG_SLUB_DEBUG Christoph Lameter
2011-05-16 20:26 ` [slubllv5 02/25] slub: Fix control flow in slab_alloc Christoph Lameter
2011-05-16 20:26 ` [slubllv5 03/25] slub: Make CONFIG_PAGE_ALLOC work with new fastpath Christoph Lameter
2011-05-17 4:52 ` Eric Dumazet
2011-05-17 13:46 ` Christoph Lameter
2011-05-17 19:22 ` Pekka Enberg
2011-05-16 20:26 ` [slubllv5 04/25] slub: Push irq disable into allocate_slab() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 05/25] slub: Do not use frozen page flag but a bit in the page counters Christoph Lameter
2011-05-16 20:26 ` [slubllv5 06/25] slub: Move page->frozen handling near where the page->freelist handling occurs Christoph Lameter
2011-05-16 20:26 ` [slubllv5 07/25] x86: Add support for cmpxchg_double Christoph Lameter
2011-05-26 17:57 ` Pekka Enberg
2011-05-26 18:02 ` Christoph Lameter
2011-05-26 18:05 ` H. Peter Anvin
2011-05-26 18:17 ` Christoph Lameter
2011-05-26 18:29 ` H. Peter Anvin
2011-05-26 18:42 ` Christoph Lameter
2011-05-26 21:16 ` Christoph Lameter
2011-05-26 21:21 ` H. Peter Anvin
2011-05-26 21:25 ` Eric Dumazet
2011-05-26 21:31 ` H. Peter Anvin
2011-05-26 21:45 ` Eric Dumazet
2011-05-27 0:49 ` H. Peter Anvin
2011-05-31 15:13 ` Christoph Lameter
2011-05-31 15:16 ` H. Peter Anvin
2011-05-31 16:53 ` Christoph Lameter
2011-05-31 23:16 ` H. Peter Anvin
2011-05-31 23:49 ` Christoph Lameter
2011-05-31 23:54 ` H. Peter Anvin
2011-06-01 14:13 ` Christoph Lameter
2011-06-01 14:46 ` Christoph Lameter
2011-06-01 15:42 ` H. Peter Anvin
2011-06-01 16:08 ` Christoph Lameter
2011-06-01 15:41 ` H. Peter Anvin
2011-05-27 0:50 ` H. Peter Anvin
2011-05-31 15:10 ` Christoph Lameter
2011-05-16 20:26 ` [slubllv5 08/25] mm: Rearrange struct page Christoph Lameter
2011-05-16 20:26 ` [slubllv5 09/25] slub: Add cmpxchg_double_slab() Christoph Lameter
2011-05-16 20:26 ` Christoph Lameter [this message]
2011-05-16 20:26 ` [slubllv5 11/25] slub: Pass kmem_cache struct to lock and freeze slab Christoph Lameter
2011-05-16 20:26 ` [slubllv5 12/25] slub: Rework allocator fastpaths Christoph Lameter
2011-05-16 20:26 ` [slubllv5 13/25] slub: Invert locking and avoid slab lock Christoph Lameter
2011-05-16 20:26 ` [slubllv5 14/25] slub: Disable interrupts in free_debug processing Christoph Lameter
2011-05-16 20:26 ` [slubllv5 15/25] slub: Avoid disabling interrupts in free slowpath Christoph Lameter
2011-05-16 20:26 ` [slubllv5 16/25] slub: Get rid of the another_slab label Christoph Lameter
2011-05-16 20:26 ` [slubllv5 17/25] slub: Add statistics for the case that the current slab does not match the node Christoph Lameter
2011-05-16 20:26 ` [slubllv5 18/25] slub: fast release on full slab Christoph Lameter
2011-05-16 20:26 ` [slubllv5 19/25] slub: Not necessary to check for empty slab on load_freelist Christoph Lameter
2011-05-16 20:26 ` [slubllv5 20/25] slub: slabinfo update for cmpxchg handling Christoph Lameter
2011-05-16 20:26 ` [slubllv5 21/25] slub: Prepare inuse field in new_slab() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 22/25] slub: pass kmem_cache_cpu pointer to get_partial() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 23/25] slub: return object pointer from get_partial() / new_slab() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 24/25] slub: Remove gotos from __slab_free() Christoph Lameter
2011-05-16 20:26 ` [slubllv5 25/25] slub: Remove gotos from __slab_alloc() Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110516202626.965065592@linux.com \
--to=cl@linux.com \
--cc=eric.dumazet@gmail.com \
--cc=hpa@zytor.com \
--cc=linux-mm@kvack.org \
--cc=penberg@cs.helsinki.fi \
--cc=rientjes@google.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).