Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/ttm/pool: Revert to clear-on-alloc to honor TTM_TT_FLAG_ZERO_ALLOC
@ 2024-06-20 16:01 Nirmoy Das
  2024-06-20 17:08 ` ✓ CI.Patch_applied: success for " Patchwork
                   ` (8 more replies)
  0 siblings, 9 replies; 12+ messages in thread
From: Nirmoy Das @ 2024-06-20 16:01 UTC (permalink / raw)
  To: dri-devel
  Cc: intel-xe, Nirmoy Das, Christian Koenig, Thomas Hellström,
	Matthew Auld

Currently ttm pool is not honoring TTM_TT_FLAG_ZERO_ALLOC flag and
clearing pages on free. It does help with allocation latency but clearing
happens even if drm driver doesn't passes the flag. If clear on free
is needed then a new flag can be added for that purpose.

Cc: Christian Koenig <christian.koenig@amd.com>
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
---
 drivers/gpu/drm/ttm/ttm_pool.c | 31 +++++++++++++++++--------------
 1 file changed, 17 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 6e1fd6985ffc..cbbd722185ee 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -224,15 +224,6 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
 /* Give pages into a specific pool_type */
 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
 {
-	unsigned int i, num_pages = 1 << pt->order;
-
-	for (i = 0; i < num_pages; ++i) {
-		if (PageHighMem(p))
-			clear_highpage(p + i);
-		else
-			clear_page(page_address(p + i));
-	}
-
 	spin_lock(&pt->lock);
 	list_add(&p->lru, &pt->pages);
 	spin_unlock(&pt->lock);
@@ -240,15 +231,26 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
 }
 
 /* Take pages from a specific pool_type, return NULL when nothing available */
-static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
+static struct page *ttm_pool_type_take(struct ttm_pool_type *pt, bool clear)
 {
 	struct page *p;
 
 	spin_lock(&pt->lock);
 	p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
 	if (p) {
+		unsigned int i, num_pages = 1 << pt->order;
+
 		atomic_long_sub(1 << pt->order, &allocated_pages);
 		list_del(&p->lru);
+		if (clear) {
+			for (i = 0; i < num_pages; ++i) {
+				if (PageHighMem(p))
+					clear_highpage(p + i);
+				else
+					clear_page(page_address(p + i));
+			}
+		}
+
 	}
 	spin_unlock(&pt->lock);
 
@@ -279,7 +281,7 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
 	list_del(&pt->shrinker_list);
 	spin_unlock(&shrinker_lock);
 
-	while ((p = ttm_pool_type_take(pt)))
+	while ((p = ttm_pool_type_take(pt, false)))
 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
 }
 
@@ -330,7 +332,7 @@ static unsigned int ttm_pool_shrink(void)
 	list_move_tail(&pt->shrinker_list, &shrinker_list);
 	spin_unlock(&shrinker_lock);
 
-	p = ttm_pool_type_take(pt);
+	p = ttm_pool_type_take(pt, false);
 	if (p) {
 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
 		num_pages = 1 << pt->order;
@@ -457,10 +459,11 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
 	     num_pages;
 	     order = min_t(unsigned int, order, __fls(num_pages))) {
 		struct ttm_pool_type *pt;
+		bool clear = tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC;
 
 		page_caching = tt->caching;
 		pt = ttm_pool_select_type(pool, tt->caching, order);
-		p = pt ? ttm_pool_type_take(pt) : NULL;
+		p = pt ? ttm_pool_type_take(pt, clear) : NULL;
 		if (p) {
 			r = ttm_pool_apply_caching(caching, pages,
 						   tt->caching);
@@ -480,7 +483,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
 				if (num_pages < (1 << order))
 					break;
 
-				p = ttm_pool_type_take(pt);
+				p = ttm_pool_type_take(pt, clear);
 			} while (p);
 		}
 
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2024-06-24  8:41 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-06-20 16:01 [PATCH] drm/ttm/pool: Revert to clear-on-alloc to honor TTM_TT_FLAG_ZERO_ALLOC Nirmoy Das
2024-06-20 17:08 ` ✓ CI.Patch_applied: success for " Patchwork
2024-06-20 17:08 ` ✓ CI.checkpatch: " Patchwork
2024-06-20 17:10 ` ✓ CI.KUnit: " Patchwork
2024-06-20 17:24 ` ✓ CI.Build: " Patchwork
2024-06-20 17:27 ` ✗ CI.Hooks: failure " Patchwork
2024-06-20 17:28 ` ✗ CI.checksparse: warning " Patchwork
2024-06-20 18:02 ` ✓ CI.BAT: success " Patchwork
2024-06-20 20:47 ` ✗ CI.FULL: failure " Patchwork
2024-06-21 14:54 ` [PATCH] " Christian König
2024-06-21 15:43   ` Nirmoy Das
2024-06-24  8:41     ` Christian König

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox