linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Dave Airlie <airlied@gmail.com>
To: dri-devel@lists.freedesktop.org, linux-mm@kvack.org,
	Johannes Weiner <hannes@cmpxchg.org>,
	Christian Koenig <christian.koenig@amd.com>
Cc: Dave Chinner <david@fromorbit.com>,
	Kairui Song <kasong@tencent.com>,
	Dave Airlie <airlied@redhat.com>
Subject: [PATCH 07/17] ttm/pool: track allocated_pages per numa node.
Date: Mon, 30 Jun 2025 14:49:26 +1000	[thread overview]
Message-ID: <20250630045005.1337339-8-airlied@gmail.com> (raw)
In-Reply-To: <20250630045005.1337339-1-airlied@gmail.com>

From: Dave Airlie <airlied@redhat.com>

This gets the memory sizes from the nodes and stores the limit
as 50% of those. I think eventually we should drop the limits
once we have memcg aware shrinking, but this should be more NUMA
friendly, and I think seems like what people would prefer to
happen on NUMA aware systems.

Cc: Christian Koenig <christian.koenig@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
---
 drivers/gpu/drm/ttm/ttm_pool.c | 57 +++++++++++++++++++++++++---------
 1 file changed, 43 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 66cd963b24dc..c6192c915f0d 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -115,10 +115,11 @@ struct ttm_pool_tt_restore {
 
 static unsigned long page_pool_size;
 
-MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
+MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool per NUMA node");
 module_param(page_pool_size, ulong, 0644);
 
-static atomic_long_t allocated_pages;
+static unsigned long pool_node_limit[MAX_NUMNODES];
+static atomic_long_t allocated_pages[MAX_NUMNODES];
 
 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
@@ -304,8 +305,8 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
 	rcu_read_lock();
 	list_lru_add(&pt->pages, &p->lru, nid, NULL);
 	rcu_read_unlock();
-	atomic_long_add(1 << pt->order, &allocated_pages);
 
+	atomic_long_add(num_pages, &allocated_pages[nid]);
 	mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, -num_pages);
 	mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, num_pages);
 }
@@ -331,7 +332,7 @@ static struct page *ttm_pool_type_take(struct ttm_pool_type *pt, int nid)
 
 	ret = list_lru_walk_node(&pt->pages, nid, take_one_from_lru, (void *)&p, &nr_to_walk);
 	if (ret == 1 && p) {
-		atomic_long_sub(1 << pt->order, &allocated_pages);
+		atomic_long_sub(1 << pt->order, &allocated_pages[nid]);
 		mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, (1 << pt->order));
 		mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, -(1 << pt->order));
 	}
@@ -370,7 +371,7 @@ static void ttm_pool_dispose_list(struct ttm_pool_type *pt,
 		struct page *p;
 		p = list_first_entry(dispose, struct page, lru);
 		list_del_init(&p->lru);
-		atomic_long_sub(1 << pt->order, &allocated_pages);
+		atomic_long_sub(1 << pt->order, &allocated_pages[page_to_nid(p)]);
 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
 	}
 }
@@ -928,11 +929,13 @@ int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
  */
 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
 {
+	int nid = ttm_pool_nid(pool);
+
 	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
 
-	while (atomic_long_read(&allocated_pages) > page_pool_size) {
-		unsigned long diff = page_pool_size - atomic_long_read(&allocated_pages);
-		ttm_pool_shrink(ttm_pool_nid(pool), diff);
+	while (atomic_long_read(&allocated_pages[nid]) > pool_node_limit[nid]) {
+		unsigned long diff = pool_node_limit[nid] - atomic_long_read(&allocated_pages[nid]);
+		ttm_pool_shrink(nid, diff);
 	}
 }
 EXPORT_SYMBOL(ttm_pool_free);
@@ -1192,7 +1195,7 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
 	do
 		num_freed += ttm_pool_shrink(sc->nid, sc->nr_to_scan);
 	while (num_freed < sc->nr_to_scan &&
-	       atomic_long_read(&allocated_pages));
+	       atomic_long_read(&allocated_pages[sc->nid]));
 
 	sc->nr_scanned = num_freed;
 
@@ -1203,7 +1206,7 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
 					     struct shrink_control *sc)
 {
-	unsigned long num_pages = atomic_long_read(&allocated_pages);
+	unsigned long num_pages = atomic_long_read(&allocated_pages[sc->nid]);
 
 	return num_pages ? num_pages : SHRINK_EMPTY;
 }
@@ -1240,8 +1243,12 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
 /* Dump the total amount of allocated pages */
 static void ttm_pool_debugfs_footer(struct seq_file *m)
 {
-	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
-		   atomic_long_read(&allocated_pages), page_pool_size);
+	int nid;
+
+	for_each_node(nid) {
+		seq_printf(m, "\ntotal node%d\t: %8lu of %8lu\n", nid,
+			   atomic_long_read(&allocated_pages[nid]), pool_node_limit[nid]);
+	}
 }
 
 /* Dump the information for the global pools */
@@ -1340,6 +1347,22 @@ DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
 
 #endif
 
+static inline uint64_t ttm_get_node_memory_size(int nid)
+{
+	/* This is directly using si_meminfo_node implementation as the
+	 * function is not exported.
+	 */
+	int zone_type;
+	uint64_t managed_pages = 0;
+
+	pg_data_t *pgdat = NODE_DATA(nid);
+
+	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+		managed_pages +=
+			zone_managed_pages(&pgdat->node_zones[zone_type]);
+	return managed_pages * PAGE_SIZE;
+}
+
 /**
  * ttm_pool_mgr_init - Initialize globals
  *
@@ -1351,8 +1374,14 @@ int ttm_pool_mgr_init(unsigned long num_pages)
 {
 	unsigned int i;
 
-	if (!page_pool_size)
-		page_pool_size = num_pages;
+	int nid;
+	for_each_node(nid) {
+		if (!page_pool_size) {
+			uint64_t node_size = ttm_get_node_memory_size(nid);
+			pool_node_limit[nid] = (node_size >> PAGE_SHIFT) / 2;
+		} else
+			pool_node_limit[nid] = page_pool_size;
+	}
 
 	spin_lock_init(&shrinker_lock);
 	INIT_LIST_HEAD(&shrinker_list);
-- 
2.49.0



  parent reply	other threads:[~2025-06-30  4:51 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-30  4:49 drm/ttm/memcg/lru: enable memcg tracking for ttm and amdgpu driver Dave Airlie
2025-06-30  4:49 ` [PATCH 01/17] mm: add gpu active/reclaim per-node stat counters (v2) Dave Airlie
2025-06-30  4:49 ` [PATCH 02/17] drm/ttm: use gpu mm stats to track gpu memory allocations. (v2) Dave Airlie
2025-06-30 10:04   ` Christian König
2025-07-01  1:41     ` David Airlie
2025-07-02 16:08   ` Shakeel Butt
2025-06-30  4:49 ` [PATCH 03/17] mm/list_lru: export list_lru_add Dave Airlie
2025-06-30  4:49 ` [PATCH 04/17] ttm/pool: port to list_lru. (v2) Dave Airlie
2025-06-30 10:37   ` kernel test robot
2025-06-30  4:49 ` [PATCH 05/17] ttm/pool: drop numa specific pools Dave Airlie
2025-06-30 10:12   ` Christian König
2025-06-30  4:49 ` [PATCH 06/17] ttm/pool: make pool shrinker NUMA aware Dave Airlie
2025-06-30 10:15   ` Christian König
2025-06-30 21:30     ` David Airlie
2025-06-30  4:49 ` Dave Airlie [this message]
2025-06-30  4:49 ` [PATCH 08/17] memcg: add support for GPU page counters Dave Airlie
2025-07-02 16:06   ` Shakeel Butt
2025-07-03  5:43     ` David Airlie
2025-06-30  4:49 ` [PATCH 09/17] memcg: export memcg_list_lru_alloc Dave Airlie
2025-06-30  4:49 ` [PATCH 10/17] ttm: add a memcg accounting flag to the alloc/populate APIs Dave Airlie
2025-06-30  9:56   ` kernel test robot
2025-06-30 10:20   ` Christian König
2025-07-01  1:46     ` David Airlie
2025-06-30  4:49 ` [PATCH 11/17] ttm/pool: initialise the shrinker earlier Dave Airlie
2025-06-30  4:49 ` [PATCH 12/17] ttm: add objcg pointer to bo and tt Dave Airlie
2025-06-30 10:24   ` Christian König
2025-06-30 21:33     ` David Airlie
2025-07-01  7:22       ` Christian König
2025-07-01  8:06         ` David Airlie
2025-07-01  8:15           ` Christian König
2025-07-01 22:11             ` David Airlie
2025-07-02  7:27               ` Christian König
2025-07-02  7:57                 ` David Airlie
2025-07-02  8:24                   ` Christian König
2025-07-03  5:53                     ` David Airlie
2025-06-30  4:49 ` [PATCH 13/17] ttm/pool: enable memcg tracking and shrinker Dave Airlie
2025-06-30 10:23   ` Christian König
2025-06-30 21:23     ` David Airlie
2025-06-30 11:59   ` kernel test robot
2025-07-02 16:41   ` Shakeel Butt
2025-06-30  4:49 ` [PATCH 14/17] ttm: hook up memcg placement flags Dave Airlie
2025-06-30  4:49 ` [PATCH 15/17] memcontrol: allow objcg api when memcg is config off Dave Airlie
2025-06-30  4:49 ` [PATCH 16/17] memcontrol: export current_obj_cgroup Dave Airlie
2025-06-30  4:49 ` [PATCH 17/17] amdgpu: add support for memory cgroups Dave Airlie
2025-07-02 16:02   ` Shakeel Butt
2025-07-03  2:53     ` David Airlie
2025-07-03 17:58       ` Shakeel Butt
2025-07-03 18:15         ` Christian König
2025-07-03 20:06           ` Shakeel Butt
2025-07-03 21:22             ` David Airlie
2025-07-04  9:39               ` Christian König
2025-07-01 23:26 ` drm/ttm/memcg/lru: enable memcg tracking for ttm and amdgpu driver Balbir Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250630045005.1337339-8-airlied@gmail.com \
    --to=airlied@gmail.com \
    --cc=airlied@redhat.com \
    --cc=christian.koenig@amd.com \
    --cc=david@fromorbit.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=hannes@cmpxchg.org \
    --cc=kasong@tencent.com \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).