Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Brian Nguyen <brian3.nguyen@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: tejas.upadhyay@intel.com, matthew.brost@intel.com,
	shuicheng.lin@intel.com, stuart.summers@intel.com,
	Brian Nguyen <brian3.nguyen@intel.com>
Subject: [PATCH 08/11] drm/xe: Prep page reclaim in tlb inval job
Date: Tue, 18 Nov 2025 17:05:49 +0800	[thread overview]
Message-ID: <20251118090552.246243-9-brian3.nguyen@intel.com> (raw)
In-Reply-To: <20251118090552.246243-1-brian3.nguyen@intel.com>

Use page reclaim list as indicator if page reclaim action is desired and
pass it to tlb inval fence to handle.

Job will need to maintain its own embedded copy to ensure lifetime of
PRL exist until job has run.

Signed-off-by: Brian Nguyen <brian3.nguyen@intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c            |  6 ++++++
 drivers/gpu/drm/xe/xe_tlb_inval.c     | 15 ++++++++++++++
 drivers/gpu/drm/xe/xe_tlb_inval.h     |  3 +++
 drivers/gpu/drm/xe/xe_tlb_inval_job.c | 29 +++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_tlb_inval_job.h |  4 ++++
 5 files changed, 57 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 532a047676d4..03723c8d2601 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -2497,6 +2497,12 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 			goto kill_vm_tile1;
 		}
 		update.ijob = ijob;
+		if (pt_update_ops->prl.num_entries != XE_PAGE_RECLAIM_INVALID_LIST) {
+			xe_tlb_inval_job_add_page_reclaim(ijob, &pt_update_ops->prl);
+			/* Release ref from alloc, job will now handle it */
+			xe_page_reclaim_entries_put(pt_update_ops->prl.entries);
+			pt_update_ops->prl.entries = NULL;
+		}
 
 		if (tile->media_gt) {
 			dep_scheduler = to_dep_scheduler(q, tile->media_gt);
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c b/drivers/gpu/drm/xe/xe_tlb_inval.c
index 67a047521165..18d49e017828 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval.c
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.c
@@ -476,3 +476,18 @@ void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
 	fence->reclaim_entries = NULL;
 	fence->prl_sa = NULL;
 }
+
+/**
+ * xe_tlb_inval_fence_add_page_reclaim() - Attach PRL state to a TLB fence
+ * @fence: Fence issued for the invalidate
+ * @prl: Page reclaim list describing pages to reclaim
+ *
+ * Copies the PRL pointer into the fence and disables PPC flushing so the
+ * reclamation message can be sent instead.
+ */
+void xe_tlb_inval_fence_add_page_reclaim(struct xe_tlb_inval_fence *fence,
+					 struct xe_page_reclaim_list *prl)
+{
+	fence->reclaim_entries = prl->entries;
+	fence->flush_cache = false;
+}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.h b/drivers/gpu/drm/xe/xe_tlb_inval.h
index b84ce3e6f294..a1cd9afe2ca7 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval.h
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.h
@@ -13,6 +13,7 @@
 struct xe_gt;
 struct xe_guc;
 struct xe_vm;
+struct xe_page_reclaim_list;
 
 int xe_gt_tlb_inval_init_early(struct xe_gt *gt);
 
@@ -30,6 +31,8 @@ void xe_tlb_inval_fence_flush_cache(struct xe_tlb_inval_fence *fence,
 void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
 			     struct xe_tlb_inval_fence *fence,
 			     bool stack);
+void xe_tlb_inval_fence_add_page_reclaim(struct xe_tlb_inval_fence *fence,
+					 struct xe_page_reclaim_list *prl);
 
 /**
  * xe_tlb_inval_fence_wait() - TLB invalidiation fence wait
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.c b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
index 6248f90323a9..5206a751c3d3 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval_job.c
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
@@ -8,6 +8,7 @@
 #include "xe_dep_scheduler.h"
 #include "xe_exec_queue.h"
 #include "xe_gt_types.h"
+#include "xe_page_reclaim.h"
 #include "xe_tlb_inval.h"
 #include "xe_tlb_inval_job.h"
 #include "xe_migrate.h"
@@ -39,6 +40,8 @@ struct xe_tlb_inval_job {
 	int type;
 	/** @fence_armed: Fence has been armed */
 	bool fence_armed;
+	/** @prl: Embedded copy of page reclaim list */
+	struct xe_page_reclaim_list prl;
 };
 
 static struct dma_fence *xe_tlb_inval_job_run(struct xe_dep_job *dep_job)
@@ -107,6 +110,7 @@ xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
 	job->start = start;
 	job->end = end;
 	job->fence_armed = false;
+	xe_page_reclaim_list_invalidate(&job->prl);
 	job->dep.ops = &dep_job_ops;
 	job->type = type;
 	kref_init(&job->refcount);
@@ -140,6 +144,25 @@ xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
 	return ERR_PTR(err);
 }
 
+/**
+ * xe_tlb_inval_job_add_page_reclaim() - Embed PRL into a TLB job
+ * @job: TLB invalidation job that may trigger reclamation
+ * @prl: Page reclaim list populated during unbind
+ *
+ * Copies @prl into the job and takes an extra reference to the entry page so
+ * ownership can transfer to the TLB fence when the job is pushed.
+ */
+void xe_tlb_inval_job_add_page_reclaim(struct xe_tlb_inval_job *job,
+				       struct xe_page_reclaim_list *prl)
+{
+	struct xe_device *xe = gt_to_xe(job->q->gt);
+
+	WARN_ON(!xe->info.has_page_reclaim_hw_assist);
+	job->prl = *prl;
+	/* Pair with put after bo creation */
+	xe_page_reclaim_entries_get(job->prl.entries);
+}
+
 static void xe_tlb_inval_job_destroy(struct kref *ref)
 {
 	struct xe_tlb_inval_job *job = container_of(ref, typeof(*job),
@@ -150,6 +173,10 @@ static void xe_tlb_inval_job_destroy(struct kref *ref)
 	struct xe_device *xe = gt_to_xe(q->gt);
 	struct xe_vm *vm = job->vm;
 
+	/* BO creation retains a copy (if used), so no longer needed */
+	if (job->prl.entries)
+		xe_page_reclaim_entries_put(job->prl.entries);
+
 	if (!job->fence_armed)
 		kfree(ifence);
 	else
@@ -234,6 +261,8 @@ struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
 	/* Creation ref pairs with put in xe_tlb_inval_job_destroy */
 	xe_tlb_inval_fence_init(job->tlb_inval, ifence, false);
 	dma_fence_get(job->fence);	/* Pairs with put in DRM scheduler */
+	if (job->prl.num_entries != XE_PAGE_RECLAIM_INVALID_LIST)
+		xe_tlb_inval_fence_add_page_reclaim(ifence, &job->prl);
 
 	drm_sched_job_arm(&job->dep.drm);
 	/*
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.h b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
index 4d6df1a6c6ca..03d6e21cd611 100644
--- a/drivers/gpu/drm/xe/xe_tlb_inval_job.h
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
@@ -12,6 +12,7 @@ struct dma_fence;
 struct xe_dep_scheduler;
 struct xe_exec_queue;
 struct xe_migrate;
+struct xe_page_reclaim_list;
 struct xe_tlb_inval;
 struct xe_tlb_inval_job;
 struct xe_vm;
@@ -21,6 +22,9 @@ xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
 			struct xe_dep_scheduler *dep_scheduler,
 			struct xe_vm *vm, u64 start, u64 end, int type);
 
+void xe_tlb_inval_job_add_page_reclaim(struct xe_tlb_inval_job *job,
+				       struct xe_page_reclaim_list *prl);
+
 int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job);
 
 struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
-- 
2.51.2


  parent reply	other threads:[~2025-11-18  9:06 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-18  9:05 [PATCH 00/11] Page Reclamation Support for Xe3p Platforms Brian Nguyen
2025-11-18  9:05 ` [PATCH 01/11] [DO, NOT, REVIEW] drm/xe: Do not forward invalid TLB invalidation seqnos to upper layers Brian Nguyen
2025-11-18  9:05 ` [PATCH 02/11] drm/xe: Reset tlb fence timeout on invalid seqno received Brian Nguyen
2025-11-21 17:23   ` Lin, Shuicheng
2025-11-22  1:53     ` Nguyen, Brian3
2025-11-22 18:25   ` Matthew Brost
2025-11-25 11:01     ` Nguyen, Brian3
2025-11-18  9:05 ` [PATCH 03/11] drm/xe/xe_tlb_inval: Modify fence interface to support PPC flush Brian Nguyen
2025-11-21 18:02   ` Lin, Shuicheng
2025-11-22  1:54     ` Nguyen, Brian3
2025-11-22 19:32   ` Matthew Brost
2025-11-25 11:07     ` Nguyen, Brian3
2025-11-18  9:05 ` [PATCH 04/11] drm/xe: Add page reclamation info to device info Brian Nguyen
2025-11-21 18:15   ` Lin, Shuicheng
2025-11-22 18:31   ` Matthew Brost
2025-11-18  9:05 ` [PATCH 05/11] drm/xe/guc: Add page reclamation interface to GuC Brian Nguyen
2025-11-21 18:32   ` Lin, Shuicheng
2025-11-22  1:56     ` Nguyen, Brian3
2025-11-22 18:39       ` Matthew Brost
2025-11-25 11:13         ` Nguyen, Brian3
2025-11-18  9:05 ` [PATCH 06/11] drm/xe: Create page reclaim list on unbind Brian Nguyen
2025-11-21 21:29   ` Lin, Shuicheng
2025-11-22  1:57     ` Nguyen, Brian3
2025-11-22 19:18   ` Matthew Brost
2025-11-25 11:18     ` Nguyen, Brian3
2025-11-25 18:34       ` Matthew Brost
2025-11-25 19:01         ` Nguyen, Brian3
2025-11-25 19:07           ` Matthew Brost
2025-11-25 19:46             ` Nguyen, Brian3
2025-11-25 22:35               ` Matthew Brost
2025-11-26  2:33                 ` Nguyen, Brian3
2025-11-18  9:05 ` [PATCH 07/11] drm/xe: Suballocate BO for page reclaim Brian Nguyen
2025-11-22 19:42   ` Matthew Brost
2025-11-25 11:20     ` Nguyen, Brian3
2025-11-18  9:05 ` Brian Nguyen [this message]
2025-11-22 13:52   ` [PATCH 08/11] drm/xe: Prep page reclaim in tlb inval job Michal Wajdeczko
2025-11-25 11:20     ` Nguyen, Brian3
2025-11-18  9:05 ` [PATCH 09/11] drm/xe: Append page reclamation action to tlb inval Brian Nguyen
2025-11-18  9:05 ` [PATCH 10/11] drm/xe: Optimize flushing of L2$ by skipping unnecessary page reclaim Brian Nguyen
2025-11-24 12:29   ` Matthew Auld
2025-11-25  6:12     ` Nguyen, Brian3
2025-11-25 11:48     ` Upadhyay, Tejas
2025-11-25 13:05       ` Upadhyay, Tejas
2025-11-18  9:05 ` [PATCH 11/11] drm/xe: Add debugfs support for page reclamation Brian Nguyen
2025-11-21 22:32   ` Lin, Shuicheng
2025-11-22  1:57     ` Nguyen, Brian3
2025-11-22 14:18   ` Michal Wajdeczko
2025-11-25 11:21     ` Nguyen, Brian3
2025-11-18  9:52 ` ✗ CI.checkpatch: warning for Page Reclamation Support for Xe3p Platforms Patchwork
2025-11-18  9:53 ` ✓ CI.KUnit: success " Patchwork
2025-11-18 13:02 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251118090552.246243-9-brian3.nguyen@intel.com \
    --to=brian3.nguyen@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    --cc=shuicheng.lin@intel.com \
    --cc=stuart.summers@intel.com \
    --cc=tejas.upadhyay@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox