From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Matthew Brost" <matthew.brost@intel.com>,
"Christian König" <christian.koenig@amd.com>,
dri-devel@lists.freedesktop.org, "Jason Gunthorpe" <jgg@ziepe.ca>,
"Andrew Morton" <akpm@linux-foundation.org>,
"Simona Vetter" <simona.vetter@ffwll.ch>,
"Dave Airlie" <airlied@gmail.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 6/6] drm/xe: Implement two pass MMU notifiers for SVM
Date: Sat, 9 Aug 2025 15:51:37 +0200 [thread overview]
Message-ID: <20250809135137.259427-7-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20250809135137.259427-1-thomas.hellstrom@linux.intel.com>
From: Matthew Brost <matthew.brost@intel.com>
Implement two-pass MMU notifiers for SVM, enabling multiple VMs or
devices with GPU mappings to pipeline costly TLB invalidations by
issuing them in the first pass and waiting for completion in the second.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/drm_gpusvm.c | 2 +-
drivers/gpu/drm/xe/xe_svm.c | 74 ++++++++++++++++++++++++++++++------
2 files changed, 63 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 92dc7d2bd6cf..f153df1bc862 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -413,7 +413,7 @@ drm_gpusvm_notifier_invalidate_twopass(struct mmu_interval_notifier *mni,
* drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM
*/
static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
- .invalidate_twopass = drm_gpusvm_notifier_invalidate_twopass,
+ .invalidate_multipass = drm_gpusvm_notifier_invalidate_twopass,
};
/**
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 82a598c8d56e..5728394806ca 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -144,15 +144,8 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
* invalidations spanning multiple ranges.
*/
for_each_tile(tile, xe, id)
- if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ if (xe_pt_zap_ptes_range(tile, vm, range))
tile_mask |= BIT(id);
- /*
- * WRITE_ONCE pairs with READ_ONCE in
- * xe_vm_has_valid_gpu_mapping()
- */
- WRITE_ONCE(range->tile_invalidated,
- range->tile_invalidated | BIT(id));
- }
return tile_mask;
}
@@ -161,16 +154,60 @@ static void
xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
const struct mmu_notifier_range *mmu_range)
{
+ struct xe_svm_range *range = to_xe_range(r);
struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
xe_svm_assert_in_notifier(vm);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(range->tile_invalidated, range->tile_present);
+
drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
mmu_range);
}
+struct xe_svm_invalidate_pass {
+ struct drm_gpusvm *gpusvm;
+ struct drm_gpusvm_notifier *notifier;
+#define XE_SVM_INVALIDATE_FENCE_COUNT \
+ (XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE)
+ struct xe_gt_tlb_invalidation_fence fences[XE_SVM_INVALIDATE_FENCE_COUNT];
+ struct mmu_interval_notifier_pass p;
+};
+
+static struct mmu_interval_notifier_pass *
+xe_svm_invalidate_second(struct mmu_interval_notifier_pass *p,
+ const struct mmu_notifier_range *mmu_range,
+ unsigned long cur_seq)
+{
+ struct xe_svm_invalidate_pass *pass = container_of(p, typeof(*pass), p);
+ struct drm_gpusvm *gpusvm = pass->gpusvm;
+ struct drm_gpusvm_notifier *notifier = pass->notifier;
+ struct drm_gpusvm_range *r = NULL;
+ struct xe_vm *vm = gpusvm_to_vm(gpusvm);
+ u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
+ int id;
+
+ /* Adjust invalidation to notifier boundaries */
+ adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
+ adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
+
+ for (id = 0; id < XE_SVM_INVALIDATE_FENCE_COUNT; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&pass->fences[id]);
+
+ drm_gpusvm_in_notifier_lock(gpusvm);
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ xe_svm_range_notifier_event_end(vm, r, mmu_range);
+ drm_gpusvm_in_notifier_unlock(gpusvm);
+
+ kfree(pass);
+ return NULL;
+}
+
static void xe_svm_invalidate_twopass(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
const struct mmu_notifier_range *mmu_range,
@@ -179,6 +216,8 @@ static void xe_svm_invalidate_twopass(struct drm_gpusvm *gpusvm,
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
struct drm_gpusvm_range *r, *first;
+ struct xe_svm_invalidate_pass *pass = NULL;
+ struct xe_gt_tlb_invalidation_fence *fences = NULL;
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
u8 tile_mask = 0;
long err;
@@ -226,14 +265,25 @@ static void xe_svm_invalidate_twopass(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- err = xe_vm_range_tilemask_tlb_invalidation(vm, NULL, adj_start,
+ pass = kzalloc(sizeof(*pass), GFP_NOWAIT);
+ if (pass) {
+ pass->gpusvm = gpusvm;
+ pass->notifier = notifier;
+ pass->p.pass = xe_svm_invalidate_second;
+ fences = pass->fences;
+ *p = &pass->p;
+ }
+
+ err = xe_vm_range_tilemask_tlb_invalidation(vm, fences, adj_start,
adj_end, tile_mask);
WARN_ON_ONCE(err);
range_notifier_event_end:
- r = first;
- drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
- xe_svm_range_notifier_event_end(vm, r, mmu_range);
+ if (!pass) {
+ r = first;
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ xe_svm_range_notifier_event_end(vm, r, mmu_range);
+ }
}
static int __xe_svm_garbage_collector(struct xe_vm *vm,
--
2.50.1
next prev parent reply other threads:[~2025-08-09 13:52 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-09 13:51 [RFC PATCH 0/6] Multi-pass MMU interval notifiers Thomas Hellström
2025-08-09 13:51 ` [RFC PATCH 1/6] mm/mmu_notifier: Allow multiple struct mmu_interval_notifier passes Thomas Hellström
2025-08-18 16:07 ` Jason Gunthorpe
2025-08-18 16:25 ` Matthew Brost
2025-08-18 16:36 ` Jason Gunthorpe
2025-08-18 16:42 ` Thomas Hellström
2025-08-18 16:45 ` Matthew Brost
2025-08-18 16:44 ` Matthew Brost
2025-08-18 16:46 ` Jason Gunthorpe
2025-08-19 9:55 ` Alistair Popple
2025-08-19 11:33 ` Thomas Hellström
2025-08-19 15:35 ` Matthew Brost
2025-08-21 9:34 ` Thomas Hellström
2025-08-19 10:03 ` Alistair Popple
2025-08-19 11:35 ` Thomas Hellström
2025-08-09 13:51 ` [RFC PATCH 2/6] drm/gpusvm: Update GPU SVM / Xe to twopass MMU notifier Thomas Hellström
2025-08-09 13:51 ` [RFC PATCH 3/6] drm/gpusvm: Add drm_gpusvm_in_notifier_* helpers Thomas Hellström
2025-08-09 13:51 ` [RFC PATCH 4/6] drm/xe: Skip waiting on unarmed fences in xe_gt_tlb_invalidation_fence_wait Thomas Hellström
2025-08-09 13:51 ` [RFC PATCH 5/6] drm/xe: Add fences argument to xe_vm_range_tilemask_tlb_invalidation Thomas Hellström
2025-08-09 13:51 ` Thomas Hellström [this message]
2025-08-11 20:46 ` [RFC PATCH 6/6] drm/xe: Implement two pass MMU notifiers for SVM Matthew Brost
2025-08-12 9:06 ` Thomas Hellström
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250809135137.259427-7-thomas.hellstrom@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=airlied@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=christian.koenig@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jgg@ziepe.ca \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=matthew.brost@intel.com \
--cc=simona.vetter@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).