From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from us-smtp-delivery-44.mimecast.com (us-smtp-delivery-44.mimecast.com [207.211.30.44]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CB215B661 for ; Thu, 16 Oct 2025 02:32:59 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=207.211.30.44 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1760581981; cv=none; b=hK4vRifHu7MNqP+30fPKubDSp86zwPD5vDcFidKtJUqcK05Sja6p37rIhSavthGcK702me+9h1xjJOcWD3YFeX/71FuD5wif3/qFZ62VWaCJh5BGsqVrs3QvUcpuIo2bUjnUET9AaTBqtRn3Hy2FlNzhwW1OwDw/yXvc+BwgO88= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1760581981; c=relaxed/simple; bh=7vt4aH0WzgQ8uH/OZEMWN3Eby6TU2t+azqo/wtG6c1c=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=GxKZmVOvjU+SQ/32PKu8RlQxUfoocDZrNj7CIYzdtlirpMeNkGkg7p5jlZPEgRQgIS9V0Vi1OqrPU3hficNUwdC3RjxeFuF7/UaikAGtyoA3LMyYJCyUXJz+rQAaw6U88JKqDXm3Jfu5Qv905da0eEZHxm6Cdgw0Gp3doFljL9o= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=fail (p=none dis=none) header.from=gmail.com; spf=fail smtp.mailfrom=gmail.com; arc=none smtp.client-ip=207.211.30.44 Authentication-Results: smtp.subspace.kernel.org; dmarc=fail (p=none dis=none) header.from=gmail.com Authentication-Results: smtp.subspace.kernel.org; spf=fail smtp.mailfrom=gmail.com Received: from mx-prod-mc-04.mail-002.prod.us-west-2.aws.redhat.com (ec2-54-186-198-63.us-west-2.compute.amazonaws.com [54.186.198.63]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.3, cipher=TLS_AES_256_GCM_SHA384) id us-mta-601-GWQL3w48O1-qtZ9GREwTNQ-1; Wed, 15 Oct 2025 22:32:55 -0400 X-MC-Unique: GWQL3w48O1-qtZ9GREwTNQ-1 X-Mimecast-MFC-AGG-ID: GWQL3w48O1-qtZ9GREwTNQ_1760581974 Received: from mx-prod-int-06.mail-002.prod.us-west-2.aws.redhat.com (mx-prod-int-06.mail-002.prod.us-west-2.aws.redhat.com [10.30.177.93]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by mx-prod-mc-04.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTPS id EA6F41954200; Thu, 16 Oct 2025 02:32:53 +0000 (UTC) Received: from dreadlord.taild9177d.ts.net (unknown [10.67.32.64]) by mx-prod-int-06.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTP id 954381800446; Thu, 16 Oct 2025 02:32:47 +0000 (UTC) From: Dave Airlie To: dri-devel@lists.freedesktop.org, tj@kernel.org, christian.koenig@amd.com, Johannes Weiner , Michal Hocko , Roman Gushchin , Shakeel Butt , Muchun Song Cc: cgroups@vger.kernel.org, Dave Chinner , Waiman Long , simona@ffwll.ch Subject: [PATCH 05/16] ttm/pool: make pool shrinker NUMA aware Date: Thu, 16 Oct 2025 12:31:33 +1000 Message-ID: <20251016023205.2303108-6-airlied@gmail.com> In-Reply-To: <20251016023205.2303108-1-airlied@gmail.com> References: <20251016023205.2303108-1-airlied@gmail.com> Precedence: bulk X-Mailing-List: cgroups@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.4.1 on 10.30.177.93 X-Mimecast-Spam-Score: 0 X-Mimecast-MFC-PROC-ID: rUFDxfFAEnUGM2nv51J4XOAO53b_9wS0swUaI0Gqv_I_1760581974 X-Mimecast-Originator: gmail.com Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: quoted-printable From: Dave Airlie This enable NUMA awareness for the shrinker on the ttm pools. Cc: Christian Koenig Cc: Dave Chinner Reviewed-by: Christian K=C3=B6nig Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_pool.c | 38 +++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.= c index 02c19395080c..ae54f01f240b 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -413,12 +413,12 @@ static struct ttm_pool_type *ttm_pool_select_type(str= uct ttm_pool *pool, =09return NULL; } =20 -/* Free pages using the global shrinker list */ -static unsigned int ttm_pool_shrink(void) +/* Free pages using the per-node shrinker list */ +static unsigned int ttm_pool_shrink(int nid, unsigned long num_to_free) { +=09LIST_HEAD(dispose); =09struct ttm_pool_type *pt; =09unsigned int num_pages; -=09struct page *p; =20 =09down_read(&pool_shrink_rwsem); =09spin_lock(&shrinker_lock); @@ -426,13 +426,10 @@ static unsigned int ttm_pool_shrink(void) =09list_move_tail(&pt->shrinker_list, &shrinker_list); =09spin_unlock(&shrinker_lock); =20 -=09p =3D ttm_pool_type_take(pt, ttm_pool_nid(pt->pool)); -=09if (p) { -=09=09ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true); -=09=09num_pages =3D 1 << pt->order; -=09} else { -=09=09num_pages =3D 0; -=09} +=09num_pages =3D list_lru_walk_node(&pt->pages, nid, pool_move_to_dispose_= list, &dispose, &num_to_free); +=09num_pages *=3D 1 << pt->order; + +=09ttm_pool_dispose_list(pt, &dispose); =09up_read(&pool_shrink_rwsem); =20 =09return num_pages; @@ -781,6 +778,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, stru= ct ttm_tt *tt, =09=09pt =3D ttm_pool_select_type(pool, page_caching, order); =09=09if (pt && allow_pools) =09=09=09p =3D ttm_pool_type_take(pt, ttm_pool_nid(pool)); + =09=09/* =09=09 * If that fails or previously failed, allocate from system. =09=09 * Note that this also disallows additional pool allocations using @@ -929,8 +927,10 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_t= t *tt) { =09ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages); =20 -=09while (atomic_long_read(&allocated_pages) > page_pool_size) -=09=09ttm_pool_shrink(); +=09while (atomic_long_read(&allocated_pages) > page_pool_size) { +=09=09unsigned long diff =3D page_pool_size - atomic_long_read(&allocated_= pages); +=09=09ttm_pool_shrink(ttm_pool_nid(pool), diff); +=09} } EXPORT_SYMBOL(ttm_pool_free); =20 @@ -1187,7 +1187,7 @@ static unsigned long ttm_pool_shrinker_scan(struct sh= rinker *shrink, =09unsigned long num_freed =3D 0; =20 =09do -=09=09num_freed +=3D ttm_pool_shrink(); +=09=09num_freed +=3D ttm_pool_shrink(sc->nid, sc->nr_to_scan); =09while (num_freed < sc->nr_to_scan && =09 atomic_long_read(&allocated_pages)); =20 @@ -1315,11 +1315,15 @@ static int ttm_pool_debugfs_shrink_show(struct seq_= file *m, void *data) =09=09.nr_to_scan =3D TTM_SHRINKER_BATCH, =09}; =09unsigned long count; +=09int nid; =20 =09fs_reclaim_acquire(GFP_KERNEL); -=09count =3D ttm_pool_shrinker_count(mm_shrinker, &sc); -=09seq_printf(m, "%lu/%lu\n", count, -=09=09 ttm_pool_shrinker_scan(mm_shrinker, &sc)); +=09for_each_node(nid) { +=09=09sc.nid =3D nid; +=09=09count =3D ttm_pool_shrinker_count(mm_shrinker, &sc); +=09=09seq_printf(m, "%d: %lu/%lu\n", nid, count, +=09=09=09 ttm_pool_shrinker_scan(mm_shrinker, &sc)); +=09} =09fs_reclaim_release(GFP_KERNEL); =20 =09return 0; @@ -1367,7 +1371,7 @@ int ttm_pool_mgr_init(unsigned long num_pages) #endif #endif =20 -=09mm_shrinker =3D shrinker_alloc(0, "drm-ttm_pool"); +=09mm_shrinker =3D shrinker_alloc(SHRINKER_NUMA_AWARE, "drm-ttm_pool"); =09if (!mm_shrinker) =09=09return -ENOMEM; =20 --=20 2.51.0