From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
Andrew Morton <akpm@linux-foundation.org>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Christoph Lameter <cl@linux.com>,
Lai Jiangshan <laijs@cn.fujitsu.com>,
Pekka Enberg <penberg@kernel.org>,
Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH v3 07/14] slub: Remove page->counters
Date: Wed, 18 Apr 2018 11:49:05 -0700 [thread overview]
Message-ID: <20180418184912.2851-8-willy@infradead.org> (raw)
In-Reply-To: <20180418184912.2851-1-willy@infradead.org>
From: Matthew Wilcox <mawilcox@microsoft.com>
Use page->private instead, now that these two fields are in the same
location. Include a compile-time assert that the fields don't get out
of sync.
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
include/linux/mm_types.h | 5 ++-
mm/slub.c | 68 ++++++++++++++++++----------------------
2 files changed, 33 insertions(+), 40 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9c048a512695..04d9dc442029 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -65,9 +65,9 @@ struct hmm;
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
-#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#else
#define _struct_page_alignment
-#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#endif
struct page {
/* First double word block */
@@ -105,7 +105,6 @@ struct page {
#endif
#endif
void *s_mem; /* slab first object */
- unsigned long counters; /* SLUB */
struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
diff --git a/mm/slub.c b/mm/slub.c
index 27b6ba1c116a..f2f64568b25e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -55,8 +55,9 @@
* have the ability to do a cmpxchg_double. It only protects the second
* double word in the page struct. Meaning
* A. page->freelist -> List of object free in a page
- * B. page->counters -> Counters of objects
- * C. page->frozen -> frozen state
+ * B. page->inuse -> Number of objects in use
+ * C. page->objects -> Number of objects in page
+ * D. page->frozen -> frozen state
*
* If a slab is frozen then it is exempt from list management. It is not
* on any list. The processor that froze the slab is the one who can
@@ -358,17 +359,10 @@ static __always_inline void slab_unlock(struct page *page)
static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
{
- struct page tmp;
- tmp.counters = counters_new;
- /*
- * page->counters can cover frozen/inuse/objects as well
- * as page->_refcount. If we assign to ->counters directly
- * we run the risk of losing updates to page->_refcount, so
- * be careful and only assign to the fields we need.
- */
- page->frozen = tmp.frozen;
- page->inuse = tmp.inuse;
- page->objects = tmp.objects;
+ BUILD_BUG_ON(offsetof(struct page, freelist) + sizeof(void *) !=
+ offsetof(struct page, private));
+ BUILD_BUG_ON(offsetof(struct page, freelist) % (2 * sizeof(void *)));
+ page->private = counters_new;
}
/* Interrupts must be disabled (for the fallback code to work right) */
@@ -381,7 +375,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
- if (cmpxchg_double(&page->freelist, &page->counters,
+ if (cmpxchg_double(&page->freelist, &page->private,
freelist_old, counters_old,
freelist_new, counters_new))
return true;
@@ -390,7 +384,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
{
slab_lock(page);
if (page->freelist == freelist_old &&
- page->counters == counters_old) {
+ page->private == counters_old) {
page->freelist = freelist_new;
set_page_slub_counters(page, counters_new);
slab_unlock(page);
@@ -417,7 +411,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
- if (cmpxchg_double(&page->freelist, &page->counters,
+ if (cmpxchg_double(&page->freelist, &page->private,
freelist_old, counters_old,
freelist_new, counters_new))
return true;
@@ -429,7 +423,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
local_irq_save(flags);
slab_lock(page);
if (page->freelist == freelist_old &&
- page->counters == counters_old) {
+ page->private == counters_old) {
page->freelist = freelist_new;
set_page_slub_counters(page, counters_new);
slab_unlock(page);
@@ -1788,8 +1782,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
* per cpu allocation list.
*/
freelist = page->freelist;
- counters = page->counters;
- new.counters = counters;
+ counters = page->private;
+ new.private = counters;
*objects = new.objects - new.inuse;
if (mode) {
new.inuse = page->objects;
@@ -1803,7 +1797,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
if (!__cmpxchg_double_slab(s, page,
freelist, counters,
- new.freelist, new.counters,
+ new.freelist, new.private,
"acquire_slab"))
return NULL;
@@ -2050,15 +2044,15 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
do {
prior = page->freelist;
- counters = page->counters;
+ counters = page->private;
set_freepointer(s, freelist, prior);
- new.counters = counters;
+ new.private = counters;
new.inuse--;
VM_BUG_ON(!new.frozen);
} while (!__cmpxchg_double_slab(s, page,
prior, counters,
- freelist, new.counters,
+ freelist, new.private,
"drain percpu freelist"));
freelist = nextfree;
@@ -2081,11 +2075,11 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
redo:
old.freelist = page->freelist;
- old.counters = page->counters;
+ old.private = page->private;
VM_BUG_ON(!old.frozen);
/* Determine target state of the slab */
- new.counters = old.counters;
+ new.private = old.private;
if (freelist) {
new.inuse--;
set_freepointer(s, freelist, old.freelist);
@@ -2146,8 +2140,8 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
l = m;
if (!__cmpxchg_double_slab(s, page,
- old.freelist, old.counters,
- new.freelist, new.counters,
+ old.freelist, old.private,
+ new.freelist, new.private,
"unfreezing slab"))
goto redo;
@@ -2196,17 +2190,17 @@ static void unfreeze_partials(struct kmem_cache *s,
do {
old.freelist = page->freelist;
- old.counters = page->counters;
+ old.private = page->private;
VM_BUG_ON(!old.frozen);
- new.counters = old.counters;
+ new.private = old.private;
new.freelist = old.freelist;
new.frozen = 0;
} while (!__cmpxchg_double_slab(s, page,
- old.freelist, old.counters,
- new.freelist, new.counters,
+ old.freelist, old.private,
+ new.freelist, new.private,
"unfreezing slab"));
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
@@ -2495,9 +2489,9 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
do {
freelist = page->freelist;
- counters = page->counters;
+ counters = page->private;
- new.counters = counters;
+ new.private = counters;
VM_BUG_ON(!new.frozen);
new.inuse = page->objects;
@@ -2505,7 +2499,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
} while (!__cmpxchg_double_slab(s, page,
freelist, counters,
- NULL, new.counters,
+ NULL, new.private,
"get_freelist"));
return freelist;
@@ -2830,9 +2824,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
n = NULL;
}
prior = page->freelist;
- counters = page->counters;
+ counters = page->private;
set_freepointer(s, tail, prior);
- new.counters = counters;
+ new.private = counters;
was_frozen = new.frozen;
new.inuse -= cnt;
if ((!new.inuse || !prior) && !was_frozen) {
@@ -2865,7 +2859,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
} while (!cmpxchg_double_slab(s, page,
prior, counters,
- head, new.counters,
+ head, new.private,
"__slab_free"));
if (likely(!n)) {
--
2.17.0
next prev parent reply other threads:[~2018-04-18 18:49 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-04-18 18:48 [PATCH v3 00/14] Rearrange struct page Matthew Wilcox
2018-04-18 18:48 ` [PATCH v3 01/14] s390: Use _refcount for pgtables Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 02/14] mm: Split page_type out from _mapcount Matthew Wilcox
2018-04-19 9:04 ` Vlastimil Babka
2018-04-19 11:16 ` Matthew Wilcox
2018-04-20 15:17 ` Christopher Lameter
2018-04-20 20:43 ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 03/14] mm: Mark pages in use for page tables Matthew Wilcox
2018-04-19 9:30 ` Vlastimil Babka
2018-04-18 18:49 ` [PATCH v3 04/14] mm: Switch s_mem and slab_cache in struct page Matthew Wilcox
2018-04-19 11:06 ` Vlastimil Babka
2018-04-19 11:19 ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 05/14] mm: Move 'private' union within " Matthew Wilcox
2018-04-19 11:31 ` Vlastimil Babka
2018-04-20 15:25 ` Christopher Lameter
2018-04-20 20:27 ` Matthew Wilcox
2018-04-30 9:38 ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 06/14] mm: Move _refcount out of struct page union Matthew Wilcox
2018-04-19 11:37 ` Vlastimil Babka
2018-04-30 9:40 ` Kirill A. Shutemov
2018-04-18 18:49 ` Matthew Wilcox [this message]
2018-04-19 13:42 ` [PATCH v3 07/14] slub: Remove page->counters Vlastimil Babka
2018-04-19 14:23 ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 08/14] mm: Combine first three unions in struct page Matthew Wilcox
2018-04-19 13:46 ` Vlastimil Babka
2018-04-19 14:08 ` Matthew Wilcox
2018-04-30 9:42 ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 09/14] mm: Use page->deferred_list Matthew Wilcox
2018-04-19 13:23 ` Vlastimil Babka
2018-04-30 9:43 ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 10/14] mm: Move lru union within struct page Matthew Wilcox
2018-04-19 13:56 ` Vlastimil Babka
2018-04-30 9:44 ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 11/14] mm: Combine first two unions in " Matthew Wilcox
2018-04-19 14:03 ` Vlastimil Babka
2018-04-30 9:47 ` Kirill A. Shutemov
2018-04-30 12:42 ` Matthew Wilcox
2018-04-30 13:12 ` Kirill A. Shutemov
2018-04-18 18:49 ` [PATCH v3 12/14] mm: Improve struct page documentation Matthew Wilcox
2018-04-18 23:32 ` Randy Dunlap
2018-04-18 23:43 ` Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 13/14] slab,slub: Remove rcu_head size checks Matthew Wilcox
2018-04-18 18:49 ` [PATCH v3 14/14] slub: Remove kmem_cache->reserved Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180418184912.2851-8-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=laijs@cn.fujitsu.com \
--cc=linux-mm@kvack.org \
--cc=mawilcox@microsoft.com \
--cc=penberg@kernel.org \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).