Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Arvind Yadav <arvind.yadav@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: matthew.brost@intel.com, himal.prasad.ghimiray@intel.com,
	thomas.hellstrom@linux.intel.com, pallavi.mishra@intel.com
Subject: [RFC v3 6/8] drm/xe/madvise: Implement per-VMA purgeable state tracking
Date: Wed, 10 Dec 2025 10:00:50 +0530	[thread overview]
Message-ID: <20251210043112.3267620-7-arvind.yadav@intel.com> (raw)
In-Reply-To: <20251210043112.3267620-1-arvind.yadav@intel.com>

Track purgeable state per-VMA instead of using a coarse shared
BO check. This prevents purging shared BOs until all VMAs across
all VMs are marked DONTNEED.

Add xe_bo_all_vmas_dontneed() to check all VMAs before marking
a BO purgeable. Add xe_bo_recheck_purgeable_on_vma_unbind() to
handle state transitions when VMAs are destroyed - if all
remaining VMAs are DONTNEED the BO can become purgeable, or if
no VMAs remain it transitions to WILLNEED.

The per-VMA purgeable_state field stores the madvise hint for
each mapping. Shared BOs can only be purged when all VMAs
unanimously indicate DONTNEED.

v3:
  - This addresses Thomas Hellström's feedback: "loop over all vmas
    attached to the bo and check that they all say WONTNEED. This will
    also need a check at VMA unbinding"

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Arvind Yadav <arvind.yadav@intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c         | 15 ++++-
 drivers/gpu/drm/xe/xe_vm_madvise.c | 91 +++++++++++++++++++++++++++++-
 drivers/gpu/drm/xe/xe_vm_madvise.h |  3 +
 drivers/gpu/drm/xe/xe_vm_types.h   | 11 ++++
 4 files changed, 118 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 9a6c9a26c6da..604306684082 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -40,6 +40,7 @@
 #include "xe_tile.h"
 #include "xe_tlb_inval.h"
 #include "xe_trace_bo.h"
+#include "xe_vm_madvise.h"
 #include "xe_wa.h"
 
 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
@@ -1057,12 +1058,18 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
 static void xe_vma_destroy_late(struct xe_vma *vma)
 {
 	struct xe_vm *vm = xe_vma_vm(vma);
+	struct xe_bo *bo = NULL;
 
 	if (vma->ufence) {
 		xe_sync_ufence_put(vma->ufence);
 		vma->ufence = NULL;
 	}
 
+	/* Get BO reference for purgeable state re-check */
+	if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma) &&
+	    !xe_vma_is_cpu_addr_mirror(vma))
+		bo = xe_vma_bo(vma);
+
 	if (xe_vma_is_userptr(vma)) {
 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
 
@@ -1071,7 +1078,13 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 	} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
 		xe_vm_put(vm);
 	} else {
-		xe_bo_put(xe_vma_bo(vma));
+		/* Trylock safe for async context; madvise corrects failures */
+		if (bo && dma_resv_trylock(bo->ttm.base.resv)) {
+			xe_bo_recheck_purgeable_on_vma_unbind(bo);
+			dma_resv_unlock(bo->ttm.base.resv);
+		}
+
+		xe_bo_put(bo);
 	}
 
 	xe_vma_free(vma);
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
index 3e6f77dc1dcb..9f2d6c1d3062 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -12,6 +12,7 @@
 #include "xe_pat.h"
 #include "xe_pt.h"
 #include "xe_svm.h"
+#include "xe_vm.h"
 
 struct xe_vmas_in_madvise_range {
 	u64 addr;
@@ -158,6 +159,84 @@ static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
 	}
 }
 
+/**
+ * xe_bo_all_vmas_dontneed() - Check if all VMAs of a BO are marked DONTNEED
+ * @bo: Buffer object
+ *
+ * Check all VMAs across all VMs to determine if BO can be purged.
+ * Shared BOs require unanimous DONTNEED state from all mappings.
+ *
+ * Caller must hold BO dma-resv lock.
+ *
+ * Return: true if all VMAs are DONTNEED, false otherwise
+ */
+static bool xe_bo_all_vmas_dontneed(struct xe_bo *bo)
+{
+	struct drm_gpuvm_bo *vm_bo;
+	struct drm_gpuva *gpuva;
+	struct drm_gem_object *obj = &bo->ttm.base;
+	bool has_vmas = false;
+
+	dma_resv_assert_held(bo->ttm.base.resv);
+
+	drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+		drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
+			struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+			has_vmas = true;
+
+			/* Any non-DONTNEED VMA prevents purging */
+			if (READ_ONCE(vma->purgeable_state) != XE_MADV_PURGEABLE_DONTNEED)
+				return false;
+		}
+	}
+
+	/* No VMAs means not purgeable */
+	if (!has_vmas)
+		return false;
+
+	return true;
+}
+
+/**
+ * xe_bo_recheck_purgeable_on_vma_unbind() - Re-evaluate BO purgeable state after VMA unbind
+ * @bo: Buffer object
+ *
+ * When a VMA is unbound, re-check if the BO's purgeable state should change.
+ * Destroyed VMAs may allow the BO to become purgeable if all remaining VMAs
+ * are DONTNEED, or require transition to WILLNEED if no VMAs remain.
+ *
+ * Called from VMA destruction path with BO dma-resv lock held.
+ */
+void xe_bo_recheck_purgeable_on_vma_unbind(struct xe_bo *bo)
+{
+	int current_state;
+
+	if (!bo)
+		return;
+
+	dma_resv_assert_held(bo->ttm.base.resv);
+
+	current_state = atomic_read(&bo->madv_purgeable);
+
+	/*
+	 * Once purged, always purged. Cannot transition back to WILLNEED.
+	 * This matches i915 semantics where purged BOs are permanently invalid.
+	 */
+	if (current_state == XE_MADV_PURGEABLE_PURGED)
+		return;
+
+	if (xe_bo_all_vmas_dontneed(bo)) {
+		/* All VMAs are DONTNEED - mark BO purgeable */
+		if (current_state != XE_MADV_PURGEABLE_DONTNEED)
+			atomic_set(&bo->madv_purgeable, XE_MADV_PURGEABLE_DONTNEED);
+	} else {
+		/* At least one VMA is WILLNEED - BO must not be purgeable */
+		if (current_state != XE_MADV_PURGEABLE_WILLNEED)
+			atomic_set(&bo->madv_purgeable, XE_MADV_PURGEABLE_WILLNEED);
+	}
+}
+
 /*
  * Handle purgeable buffer object advice for DONTNEED/WILLNEED/PURGED.
  * Returns true if any BO was purged, false otherwise.
@@ -181,6 +260,10 @@ static bool xe_vm_madvise_purgeable_bo(struct xe_device *xe, struct xe_vm *vm,
 		/* BO must be locked before modifying madv state */
 		xe_bo_assert_held(bo);
 
+		/* Skip external dma-bufs */
+		if (xe_bo_is_external_dmabuf(bo))
+			continue;
+
 		/*
 		 * Once purged, always purged. Cannot transition back to WILLNEED.
 		 * This matches i915 semantics where purged BOs are permanently invalid.
@@ -192,10 +275,16 @@ static bool xe_vm_madvise_purgeable_bo(struct xe_device *xe, struct xe_vm *vm,
 
 		switch (op->purge_state_val.val) {
 		case DRM_XE_VMA_PURGEABLE_STATE_WILLNEED:
+			vmas[i]->purgeable_state = XE_MADV_PURGEABLE_WILLNEED;
+			 /* Mark VMA WILLNEED - BO becomes non-purgeable immediately */
 			atomic_set(&bo->madv_purgeable, XE_MADV_PURGEABLE_WILLNEED);
 			break;
 		case DRM_XE_VMA_PURGEABLE_STATE_DONTNEED:
-			atomic_set(&bo->madv_purgeable, XE_MADV_PURGEABLE_DONTNEED);
+			vmas[i]->purgeable_state = XE_MADV_PURGEABLE_DONTNEED;
+
+			/* Mark BO purgeable only if all VMAs are DONTNEED */
+			if (xe_bo_all_vmas_dontneed(bo))
+				atomic_set(&bo->madv_purgeable, XE_MADV_PURGEABLE_DONTNEED);
 			break;
 		default:
 			drm_warn(&vm->xe->drm, "Invalid madvice value = %d\n",
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
index b0e1fc445f23..61868f851949 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.h
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
@@ -8,8 +8,11 @@
 
 struct drm_device;
 struct drm_file;
+struct xe_bo;
 
 int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
 
+void xe_bo_recheck_purgeable_on_vma_unbind(struct xe_bo *bo);
+
 #endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 3bf912bfbdcc..ad16fb147da6 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -140,6 +140,17 @@ struct xe_vma {
 	 */
 	bool skip_invalidation;
 
+	/**
+	 * @purgeable_state: Purgeable hint for this VMA mapping
+	 *
+	 * Per-VMA purgeable state from madvise. Valid states are WILLNEED (0)
+	 * or DONTNEED (1). Shared BOs require all VMAs to be DONTNEED before
+	 * the BO can be purged. PURGED state exists only at BO level.
+	 *
+	 * Protected by BO dma-resv lock. Set via DRM_IOCTL_XE_MADVISE.
+	 */
+	u32 purgeable_state;
+
 	/**
 	 * @ufence: The user fence that was provided with MAP.
 	 * Needs to be signalled before UNMAP can be processed.
-- 
2.43.0


  parent reply	other threads:[~2025-12-10  4:32 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-10  4:30 [RFC v3 0/8] drm/xe/madvise: Add support for purgeable buffer objects Arvind Yadav
2025-12-10  4:30 ` [RFC v3 1/8] drm/xe/uapi: Add UAPI " Arvind Yadav
2025-12-10  5:33   ` Matthew Brost
2025-12-10  7:16     ` Yadav, Arvind
2026-01-22 13:32       ` Thomas Hellström
2025-12-10  4:30 ` [RFC v3 2/8] drm/xe/bo: Add purgeable bo state tracking and field madv to xe_bo Arvind Yadav
2025-12-10  5:46   ` Matthew Brost
2025-12-10  7:18     ` Yadav, Arvind
2025-12-10  4:30 ` [RFC v3 3/8] drm/xe/madvise: Implement purgeable buffer object support Arvind Yadav
2025-12-10  4:30 ` [RFC v3 4/8] drm/xe/bo: Handle CPU faults on purged buffer objects Arvind Yadav
2025-12-10  4:30 ` [RFC v3 5/8] drm/xe/vm: Prevent binding of " Arvind Yadav
2025-12-10  4:30 ` Arvind Yadav [this message]
2025-12-10  4:30 ` [RFC v3 7/8] drm/xe/madvise: Block imported and exported dma-bufs Arvind Yadav
2025-12-10  4:30 ` [RFC v3 8/8] drm/xe/bo: Add purgeable shrinker state helpers Arvind Yadav
2025-12-11  7:22 ` ✗ CI.checkpatch: warning for drm/xe/madvise: Add support for purgeable buffer objects (rev4) Patchwork
2025-12-11  7:24 ` ✓ CI.KUnit: success " Patchwork
2025-12-11  7:57 ` ✓ Xe.CI.BAT: " Patchwork
2025-12-11 14:56 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251210043112.3267620-7-arvind.yadav@intel.com \
    --to=arvind.yadav@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    --cc=pallavi.mishra@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox