From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Zhen Lei <thunder.leizhen@huawei.com>, Waiman Long <longman@redhat.com>
Subject: [patch 15/25] debugobjects: Rework object allocation
Date: Mon, 7 Oct 2024 18:50:09 +0200 (CEST) [thread overview]
Message-ID: <20241007164913.893554162@linutronix.de> (raw)
In-Reply-To: 20241007163507.647617031@linutronix.de
The current allocation scheme tries to allocate from the per CPU pool
first. If that fails it allocates one object from the global pool and then
refills the per CPU pool from the global pool.
That is in the way of switching the pool management to batch mode as the
global pool needs to be a strict stack of batches, which does not allow
to allocate single objects.
Rework the code to refill the per CPU pool first and then allocate the
object from the refilled batch. Also try to allocate from the to free pool
first to avoid freeing and reallocating objects.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/debugobjects.c | 144 +++++++++++++++++++++++++----------------------------
1 file changed, 69 insertions(+), 75 deletions(-)
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -141,6 +141,64 @@ static __always_inline bool pool_must_re
return pool_count(pool) < pool->min_cnt / 2;
}
+static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
+{
+ if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt)
+ return false;
+
+ for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) {
+ struct hlist_node *node = src->objects.first;
+
+ WRITE_ONCE(src->cnt, src->cnt - 1);
+ WRITE_ONCE(dst->cnt, dst->cnt + 1);
+
+ hlist_del(node);
+ hlist_add_head(node, &dst->objects);
+ }
+ return true;
+}
+
+static struct debug_obj *__alloc_object(struct hlist_head *list)
+{
+ struct debug_obj *obj;
+
+ if (unlikely(!list->first))
+ return NULL;
+
+ obj = hlist_entry(list->first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ return obj;
+}
+
+static struct debug_obj *pcpu_alloc(void)
+{
+ struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
+
+ lockdep_assert_irqs_disabled();
+
+ for (;;) {
+ struct debug_obj *obj = __alloc_object(&pcp->objects);
+
+ if (likely(obj)) {
+ pcp->cnt--;
+ return obj;
+ }
+
+ guard(raw_spinlock)(&pool_lock);
+ if (!pool_move_batch(pcp, &pool_to_free)) {
+ if (!pool_move_batch(pcp, &pool_global))
+ return NULL;
+ }
+ obj_pool_used += pcp->cnt;
+
+ if (obj_pool_used > obj_pool_max_used)
+ obj_pool_max_used = obj_pool_used;
+
+ if (pool_global.cnt < obj_pool_min_free)
+ obj_pool_min_free = pool_global.cnt;
+ }
+}
+
static void free_object_list(struct hlist_head *head)
{
struct hlist_node *tmp;
@@ -158,7 +216,6 @@ static void free_object_list(struct hlis
static void fill_pool_from_freelist(void)
{
static unsigned long state;
- struct debug_obj *obj;
/*
* Reuse objs from the global obj_to_free list; they will be
@@ -180,17 +237,11 @@ static void fill_pool_from_freelist(void
if (test_bit(0, &state) || test_and_set_bit(0, &state))
return;
- guard(raw_spinlock)(&pool_lock);
- /*
- * Recheck with the lock held as the worker thread might have
- * won the race and freed the global free list already.
- */
- while (pool_to_free.cnt && (pool_global.cnt < pool_global.min_cnt)) {
- obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
- hlist_del(&obj->node);
- WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
- hlist_add_head(&obj->node, &pool_global.objects);
- WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
+ /* Avoid taking the lock when there is no work to do */
+ while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
+ guard(raw_spinlock)(&pool_lock);
+ /* Move a batch if possible */
+ pool_move_batch(&pool_global, &pool_to_free);
}
clear_bit(0, &state);
}
@@ -251,74 +302,17 @@ static struct debug_obj *lookup_object(v
return NULL;
}
-/*
- * Allocate a new object from the hlist
- */
-static struct debug_obj *__alloc_object(struct hlist_head *list)
+static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
+ const struct debug_obj_descr *descr)
{
- struct debug_obj *obj = NULL;
-
- if (list->first) {
- obj = hlist_entry(list->first, typeof(*obj), node);
- hlist_del(&obj->node);
- }
-
- return obj;
-}
-
-static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
-{
- struct obj_pool *percpu_pool = this_cpu_ptr(&pool_pcpu);
struct debug_obj *obj;
- if (likely(obj_cache)) {
- obj = __alloc_object(&percpu_pool->objects);
- if (obj) {
- percpu_pool->cnt--;
- goto init_obj;
- }
- } else {
+ if (likely(obj_cache))
+ obj = pcpu_alloc();
+ else
obj = __alloc_object(&pool_boot);
- goto init_obj;
- }
-
- raw_spin_lock(&pool_lock);
- obj = __alloc_object(&pool_global.objects);
- if (obj) {
- obj_pool_used++;
- WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
-
- /*
- * Looking ahead, allocate one batch of debug objects and
- * put them into the percpu free pool.
- */
- if (likely(obj_cache)) {
- int i;
-
- for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
- struct debug_obj *obj2;
-
- obj2 = __alloc_object(&pool_global.objects);
- if (!obj2)
- break;
- hlist_add_head(&obj2->node, &percpu_pool->objects);
- percpu_pool->cnt++;
- obj_pool_used++;
- WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
- }
- }
-
- if (obj_pool_used > obj_pool_max_used)
- obj_pool_max_used = obj_pool_used;
-
- if (pool_global.cnt < obj_pool_min_free)
- obj_pool_min_free = pool_global.cnt;
- }
- raw_spin_unlock(&pool_lock);
-init_obj:
- if (obj) {
+ if (likely(obj)) {
obj->object = addr;
obj->descr = descr;
obj->state = ODEBUG_STATE_NONE;
next prev parent reply other threads:[~2024-10-07 16:50 UTC|newest]
Thread overview: 81+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-07 16:49 [patch 00/25] debugobjects: Rework object handling Thomas Gleixner
2024-10-07 16:49 ` [patch 01/25] debugobjects: Delete a piece of redundant code Thomas Gleixner
2024-10-07 16:49 ` [patch 02/25] debugobjects: Collect newly allocated objects in a list to reduce lock contention Thomas Gleixner
2024-10-07 16:49 ` [patch 03/25] debugobjects: Dont destroy kmem cache in init() Thomas Gleixner
2024-10-10 2:14 ` Leizhen (ThunderTown)
2024-10-10 11:46 ` Thomas Gleixner
2024-10-10 13:31 ` Leizhen (ThunderTown)
2024-10-11 20:37 ` Thomas Gleixner
2024-10-12 1:50 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:49 ` [patch 04/25] debugobjects: Remove pointless hlist initialization Thomas Gleixner
2024-10-10 2:19 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:49 ` [patch 05/25] debugobjects: Dont free objects directly on CPU hotplug Thomas Gleixner
2024-10-10 2:33 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:49 ` [patch 06/25] debugobjects: Reuse put_objects() on OOM Thomas Gleixner
2024-10-10 2:38 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:49 ` [patch 07/25] debugobjects: Remove pointless debug printk Thomas Gleixner
2024-10-10 2:44 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 08/25] debugobjects: Provide and use free_object_list() Thomas Gleixner
2024-10-10 2:54 ` Leizhen (ThunderTown)
2024-10-11 20:40 ` Thomas Gleixner
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 09/25] debugobjects: Make debug_objects_enabled bool Thomas Gleixner
2024-10-10 3:00 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 10/25] debugobjects: Reduce parallel pool fill attempts Thomas Gleixner
2024-10-07 16:50 ` [patch 11/25] debugobjects: Move pools into a datastructure Thomas Gleixner
2024-10-10 3:47 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 12/25] debugobjects: Use separate list head for boot pool Thomas Gleixner
2024-10-10 4:04 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 13/25] debugobjects: Rename and tidy up per CPU pools Thomas Gleixner
2024-10-10 6:23 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 14/25] debugobjects: Move min/max count into pool struct Thomas Gleixner
2024-10-10 6:26 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` Thomas Gleixner [this message]
2024-10-10 6:39 ` [patch 15/25] debugobjects: Rework object allocation Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 16/25] debugobjects: Rework object freeing Thomas Gleixner
2024-10-10 7:39 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 17/25] debugobjects: Rework free_object_work() Thomas Gleixner
2024-10-10 8:10 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 18/25] debugobjects: Use static key for boot pool selection Thomas Gleixner
2024-10-10 8:12 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 19/25] debugobjects: Prepare for batching Thomas Gleixner
2024-10-10 8:15 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 20/25] debugobjects: Prepare kmem_cache allocations " Thomas Gleixner
2024-10-10 8:40 ` Leizhen (ThunderTown)
2024-10-11 20:47 ` Thomas Gleixner
2024-10-12 2:02 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 21/25] debugobjects: Implement batch processing Thomas Gleixner
2024-10-10 9:39 ` Leizhen (ThunderTown)
2024-10-11 20:48 ` Thomas Gleixner
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 22/25] debugobjects: Move pool statistics into global_pool struct Thomas Gleixner
2024-10-10 9:50 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 23/25] debugobjects: Double the per CPU slots Thomas Gleixner
2024-10-10 9:51 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 24/25] debugobjects: Refill per CPU pool more agressively Thomas Gleixner
2024-10-10 10:02 ` Leizhen (ThunderTown)
2024-10-11 20:49 ` Thomas Gleixner
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
2024-10-07 16:50 ` [patch 25/25] debugobjects: Track object usage to avoid premature freeing of objects Thomas Gleixner
2024-10-10 13:13 ` Leizhen (ThunderTown)
2024-10-13 18:45 ` Thomas Gleixner
2024-10-14 1:46 ` Leizhen (ThunderTown)
2024-10-15 15:36 ` [tip: core/debugobjects] " tip-bot2 for Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241007164913.893554162@linutronix.de \
--to=tglx@linutronix.de \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=thunder.leizhen@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox