From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
dri-devel@lists.freedesktop.org, himal.prasad.ghimiray@intel.com,
apopple@nvidia.com, airlied@gmail.com,
"Simona Vetter" <simona.vetter@ffwll.ch>,
felix.kuehling@amd.com, "Matthew Brost" <matthew.brost@intel.com>,
"Christian König" <christian.koenig@amd.com>,
dakr@kernel.org, "Mrozek, Michal" <michal.mrozek@intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>
Subject: [PATCH v2 10/17] drm/xe: Pass a drm_pagemap pointer around with the memory advise attributes
Date: Tue, 11 Nov 2025 17:44:00 +0100 [thread overview]
Message-ID: <20251111164408.113070-11-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20251111164408.113070-1-thomas.hellstrom@linux.intel.com>
As a consequence, struct xe_vma_mem_attr() can't simply be assigned
or freed without taking the reference count of individual members
into account. Also add helpers to do that.
v2:
- Move some calls to xe_vma_mem_attr_fini() to xe_vma_free(). (Matt Brost)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 2 +-
drivers/gpu/drm/xe/xe_vm.c | 34 +++++++++++++++++++++++++-----
drivers/gpu/drm/xe/xe_vm.h | 1 +
drivers/gpu/drm/xe/xe_vm_madvise.c | 1 +
drivers/gpu/drm/xe/xe_vm_types.h | 9 ++++++++
5 files changed, 41 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 7db9eafec66b..4a3853a5cd64 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -329,7 +329,7 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
default_attr.pat_index = vma->attr.default_pat_index;
default_attr.default_pat_index = vma->attr.default_pat_index;
- vma->attr = default_attr;
+ xe_vma_mem_attr_copy(&vma->attr, &default_attr);
} else {
vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
range_start, range_end);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 2321e7c8ae76..27669f80b7ff 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -957,14 +957,37 @@ struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
return fence;
}
+static void xe_vma_mem_attr_fini(struct xe_vma_mem_attr *attr)
+{
+ drm_pagemap_put(attr->preferred_loc.dpagemap);
+}
+
static void xe_vma_free(struct xe_vma *vma)
{
+ xe_vma_mem_attr_fini(&vma->attr);
+
if (xe_vma_is_userptr(vma))
kfree(to_userptr_vma(vma));
else
kfree(vma);
}
+/**
+ * xe_vma_mem_attr_copy() - copy an xe_vma_mem_attr structure.
+ * @to: Destination.
+ * @from: Source.
+ *
+ * Copies an xe_vma_mem_attr structure taking care to get reference
+ * counting of individual members right.
+ */
+void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from)
+{
+ xe_vma_mem_attr_fini(to);
+ *to = *from;
+ if (to->preferred_loc.dpagemap)
+ drm_pagemap_get(to->preferred_loc.dpagemap);
+}
+
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
@@ -1015,8 +1038,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (vm->xe->info.has_atomic_enable_pte_bit)
vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
- vma->attr = *attr;
-
+ xe_vma_mem_attr_copy(&vma->attr, attr);
if (bo) {
struct drm_gpuvm_bo *vm_bo;
@@ -4240,7 +4262,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
struct drm_gpuva_op *__op;
unsigned int vma_flags = 0;
bool remap_op = false;
- struct xe_vma_mem_attr tmp_attr;
+ struct xe_vma_mem_attr tmp_attr = {};
u16 default_pat;
int err;
@@ -4333,7 +4355,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
* VMA, so they can be assigned to newly MAP created vma.
*/
if (is_madvise)
- tmp_attr = vma->attr;
+ xe_vma_mem_attr_copy(&tmp_attr, &vma->attr);
xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
} else if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4343,12 +4365,13 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
* copy them to new vma.
*/
if (is_madvise)
- vma->attr = tmp_attr;
+ xe_vma_mem_attr_copy(&vma->attr, &tmp_attr);
}
}
xe_vm_unlock(vm);
drm_gpuva_ops_free(&vm->gpuvm, ops);
+ xe_vma_mem_attr_fini(&tmp_attr);
return 0;
unwind_ops:
@@ -4406,3 +4429,4 @@ int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t r
return xe_vm_alloc_vma(vm, &map_req, false);
}
+
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index ef8a5019574e..d328d31afe8e 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -411,4 +411,5 @@ static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
+void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from);
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
index cad3cf627c3f..9553008409d1 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -95,6 +95,7 @@ static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
*/
vmas[i]->attr.preferred_loc.migration_policy =
op->preferred_mem_loc.migration_policy;
+ vmas[i]->attr.preferred_loc.dpagemap = NULL;
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 0d09a322199d..ca489aa7c652 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -20,6 +20,8 @@
#include "xe_range_fence.h"
#include "xe_userptr.h"
+struct drm_pagemap;
+
struct xe_bo;
struct xe_svm_range;
struct xe_sync_entry;
@@ -65,6 +67,13 @@ struct xe_vma_mem_attr {
* closest device memory respectively.
*/
u32 devmem_fd;
+ /**
+ * @preferred_loc.dpagemap: Reference-counted pointer to the drm_pagemap preferred
+ * for migration on a SVM page-fault. The pointer is protected by the
+ * vm lock, and is %NULL if @devmem_fd should be consulted for special
+ * values.
+ */
+ struct drm_pagemap *dpagemap;
} preferred_loc;
/**
--
2.51.1
next prev parent reply other threads:[~2025-11-11 16:45 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-11 16:43 [PATCH v2 00/17] Dynamic drm_pagemaps and Initial multi-device SVM Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 01/17] drm/xe/svm: Fix a debug printout Thomas Hellström
2025-11-12 4:29 ` Ghimiray, Himal Prasad
2025-11-11 16:43 ` [PATCH v2 02/17] drm/pagemap, drm/xe: Add refcounting to struct drm_pagemap Thomas Hellström
2025-11-12 6:07 ` Ghimiray, Himal Prasad
2025-11-21 10:19 ` Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 03/17] drm/pagemap: Add a refcounted drm_pagemap backpointer to struct drm_pagemap_zdd Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 04/17] drm/pagemap, drm/xe: Manage drm_pagemap provider lifetimes Thomas Hellström
2025-11-18 0:44 ` Matthew Brost
2025-11-11 16:43 ` [PATCH v2 05/17] drm/pagemap: Add a drm_pagemap cache and shrinker Thomas Hellström
2025-11-19 19:28 ` Matthew Brost
2025-11-11 16:43 ` [PATCH v2 06/17] drm/xe: Use the " Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 07/17] drm/pagemap: Remove the drm_pagemap_create() interface Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 08/17] drm/pagemap_util: Add a utility to assign an owner to a set of interconnected gpus Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 09/17] drm/xe: Use the drm_pagemap_util helper to get a svm pagemap owner Thomas Hellström
2025-11-11 16:44 ` Thomas Hellström [this message]
2025-11-11 16:44 ` [PATCH v2 11/17] drm/xe: Use the vma attibute drm_pagemap to select where to migrate Thomas Hellström
2025-11-12 5:22 ` kernel test robot
2025-11-12 7:16 ` kernel test robot
2025-11-13 4:51 ` kernel test robot
2025-11-11 16:44 ` [PATCH v2 12/17] drm/xe: Simplify madvise_preferred_mem_loc() Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 13/17] drm/xe/uapi: Extend the madvise functionality to support foreign pagemap placement for svm Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 14/17] drm/xe: Support pcie p2p dma as a fast interconnect Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 15/17] drm/xe/vm: Add a couple of VM debug printouts Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 16/17] drm/pagemap, drm/xe: Support migration over interconnect Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 17/17] drm/xe/svm: Document how xe keeps drm_pagemap references Thomas Hellström
2025-11-18 0:49 ` Matthew Brost
2025-11-11 17:07 ` ✗ CI.checkpatch: warning for Dynamic drm_pagemaps and Initial multi-device SVM (rev2) Patchwork
2025-11-11 17:08 ` ✓ CI.KUnit: success " Patchwork
2025-11-11 17:45 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-12 2:53 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-18 6:15 ` [PATCH v2 00/17] Dynamic drm_pagemaps and Initial multi-device SVM Alistair Popple
2025-11-18 9:31 ` Thomas Hellström
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251111164408.113070-11-thomas.hellstrom@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=felix.kuehling@amd.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=joonas.lahtinen@linux.intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.mrozek@intel.com \
--cc=simona.vetter@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).