From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: Marco Elver <elver@google.com>,
Matthew WilCox <willy@infradead.org>,
Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Andrew Morton <akpm@linux-foundation.org>,
Hyeonggon Yoo <42.hyeyoo@gmail.com>,
Roman Gushchin <roman.gushchin@linux.dev>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH v2 01/23] mm/slab: move NUMA-related code to __do_cache_alloc()
Date: Thu, 14 Apr 2022 17:57:05 +0900 [thread overview]
Message-ID: <20220414085727.643099-2-42.hyeyoo@gmail.com> (raw)
In-Reply-To: <20220414085727.643099-1-42.hyeyoo@gmail.com>
To implement slab_alloc_node() independent of NUMA configuration,
move NUMA fallback/alternate allocation code into __do_cache_alloc().
One functional change here is not to check availability of node
when allocating from local node.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
Changes from v1:
- Undo removing path to alternate_node_alloc code when node id is not specified (which
was mistake.)
mm/slab.c | 68 +++++++++++++++++++++++++------------------------------
1 file changed, 31 insertions(+), 37 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index e882657c1494..d854c24d5f5a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3187,13 +3187,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
return obj ? obj : fallback_alloc(cachep, flags);
}
+static void *__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid);
+
static __always_inline void *
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
unsigned long caller)
{
unsigned long save_flags;
void *ptr;
- int slab_node = numa_mem_id();
struct obj_cgroup *objcg = NULL;
bool init = false;
@@ -3208,30 +3209,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
-
- if (nodeid == NUMA_NO_NODE)
- nodeid = slab_node;
-
- if (unlikely(!get_node(cachep, nodeid))) {
- /* Node not bootstrapped yet */
- ptr = fallback_alloc(cachep, flags);
- goto out;
- }
-
- if (nodeid == slab_node) {
- /*
- * Use the locally cached objects if possible.
- * However ____cache_alloc does not allow fallback
- * to other nodes. It may fail while we still have
- * objects on other nodes available.
- */
- ptr = ____cache_alloc(cachep, flags);
- if (ptr)
- goto out;
- }
- /* ___cache_alloc_node can fall back to other nodes */
- ptr = ____cache_alloc_node(cachep, flags, nodeid);
- out:
+ ptr = __do_cache_alloc(cachep, flags, nodeid);
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
init = slab_want_init_on_alloc(flags, cachep);
@@ -3242,31 +3220,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
}
static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *objp;
+ int slab_node = numa_mem_id();
- if (current->mempolicy || cpuset_do_slab_mem_spread()) {
- objp = alternate_node_alloc(cache, flags);
- if (objp)
- goto out;
+ if (nodeid == NUMA_NO_NODE) {
+ if (current->mempolicy || cpuset_do_slab_mem_spread()) {
+ objp = alternate_node_alloc(cachep, flags);
+ if (objp)
+ goto out;
+ }
+ /*
+ * Use the locally cached objects if possible.
+ * However ____cache_alloc does not allow fallback
+ * to other nodes. It may fail while we still have
+ * objects on other nodes available.
+ */
+ objp = ____cache_alloc(cachep, flags);
+ nodeid = slab_node;
+ } else if (nodeid == slab_node) {
+ objp = ____cache_alloc(cachep, flags);
+ } else if (!get_node(cachep, nodeid)) {
+ /* Node not bootstrapped yet */
+ objp = fallback_alloc(cachep, flags);
+ goto out;
}
- objp = ____cache_alloc(cache, flags);
/*
* We may just have run out of memory on the local node.
* ____cache_alloc_node() knows how to locate memory on other nodes
*/
if (!objp)
- objp = ____cache_alloc_node(cache, flags, numa_mem_id());
-
- out:
+ objp = ____cache_alloc_node(cachep, flags, nodeid);
+out:
return objp;
}
#else
static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
{
return ____cache_alloc(cachep, flags);
}
@@ -3293,7 +3286,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
- objp = __do_cache_alloc(cachep, flags);
+ objp = __do_cache_alloc(cachep, flags, NUMA_NO_NODE);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);
@@ -3532,7 +3525,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
local_irq_disable();
for (i = 0; i < size; i++) {
- void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
+ void *objp = kfence_alloc(s, s->object_size, flags) ?:
+ __do_cache_alloc(s, flags, NUMA_NO_NODE);
if (unlikely(!objp))
goto error;
--
2.32.0
next prev parent reply other threads:[~2022-04-14 8:57 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-14 8:57 [PATCH v2 00/23] common kmalloc for SLUB and SLAB v2 Hyeonggon Yoo
2022-04-14 8:57 ` Hyeonggon Yoo [this message]
2022-04-22 18:04 ` [PATCH v2 01/23] mm/slab: move NUMA-related code to __do_cache_alloc() Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 02/23] mm/slab: cleanup slab_alloc() and slab_alloc_node() Hyeonggon Yoo
2022-04-25 14:05 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 03/23] mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions Hyeonggon Yoo
2022-04-25 14:41 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 04/23] mm/slab_common: cleanup kmalloc_track_caller() Hyeonggon Yoo
2022-04-25 15:05 ` Vlastimil Babka
2022-04-26 15:49 ` Vlastimil Babka
2022-04-30 11:44 ` Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 05/23] mm/slab_common: cleanup __kmalloc() Hyeonggon Yoo
2022-04-26 16:02 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 06/23] mm/sl[auo]b: fold kmalloc_order_trace() into kmalloc_large() Hyeonggon Yoo
2022-04-26 16:09 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 07/23] mm/slub: move kmalloc_large_node() to slab_common.c Hyeonggon Yoo
2022-04-26 16:13 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 09/23] mm/slab_common: cleanup kmalloc_large() Hyeonggon Yoo
2022-04-26 17:18 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 10/23] mm/slab_common: cleanup kmem_cache_alloc{,node,lru} Hyeonggon Yoo
2022-04-26 18:01 ` Vlastimil Babka
2022-04-30 11:48 ` Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 11/23] mm/slab_common: kmalloc_node: pass large requests to page allocator Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 12/23] mm/slab_common: cleanup kmalloc() Hyeonggon Yoo
2022-04-26 18:00 ` Joe Perches
2022-04-28 11:30 ` Hyeonggon Yoo
2022-04-27 7:50 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 13/23] mm/slab: kmalloc: pass requests larger than order-1 page to page allocator Hyeonggon Yoo
2022-04-27 8:10 ` Vlastimil Babka
2022-04-30 11:50 ` Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 14/23] mm/slab_common: print cache name in tracepoints Hyeonggon Yoo
2022-04-29 14:05 ` Vlastimil Babka
2022-04-30 14:06 ` Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 15/23] mm/slab_common: use same tracepoint in kmalloc and normal caches Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 16/23] mm/slab_common: rename tracepoint Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 17/23] mm/slab_common: implement __kmem_cache_free() Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 18/23] mm/sl[au]b: generalize kmalloc subsystem Hyeonggon Yoo
2022-04-29 14:30 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 20/23] mm/slab_common: factor out __do_kmalloc_node() Hyeonggon Yoo
2022-04-14 11:45 ` Hyeonggon Yoo
2022-04-29 14:48 ` Vlastimil Babka
2022-04-14 8:57 ` [PATCH v2 21/23] mm/sl[au]b: remove kmem_cache_alloc_node_trace() Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 22/23] mm/sl[auo]b: move definition of __ksize() to mm/slab.h Hyeonggon Yoo
2022-04-14 8:57 ` [PATCH v2 23/23] mm/sl[au]b: check if large object is valid in __ksize() Hyeonggon Yoo
2022-04-14 9:58 ` Christoph Lameter
2022-04-14 11:46 ` Hyeonggon Yoo
2022-04-14 12:36 ` [PATCH v2 00/23] common kmalloc for SLUB and SLAB v2 Hyeonggon Yoo
[not found] ` <20220414085727.643099-9-42.hyeyoo@gmail.com>
2022-04-26 17:15 ` [PATCH v2 08/23] mm/slab_common: make kmalloc_large_node() consistent with kmalloc_large() Vlastimil Babka
2022-04-28 6:35 ` Hyeonggon Yoo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220414085727.643099-2-42.hyeyoo@gmail.com \
--to=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=elver@google.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).