* [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct
[not found] <20250814105950.2177480-1-himal.prasad.ghimiray@intel.com>
@ 2025-08-14 10:59 ` Himal Prasad Ghimiray
2025-08-18 17:27 ` Danilo Krummrich
2025-08-18 20:49 ` Rob Clark
2025-08-14 10:59 ` [PATCH v7 03/24] drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create Himal Prasad Ghimiray
2025-08-18 20:28 ` [PATCH v7 00/24] MADVISE FOR XE Rodrigo Vivi
2 siblings, 2 replies; 8+ messages in thread
From: Himal Prasad Ghimiray @ 2025-08-14 10:59 UTC (permalink / raw)
To: intel-xe
Cc: Matthew Brost, Thomas Hellström, Boris Brezillon,
Danilo Krummrich, Brendan King, Boris Brezillon, Caterina Shablia,
Rob Clark, dri-devel, Himal Prasad Ghimiray
From: Boris Brezillon <boris.brezillon@collabora.com>
We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
so, before we do that, let's pass arguments through a struct instead
of changing each call site every time a new optional argument is added.
v5
- Use drm_gpuva_op_map—same as drm_gpuvm_map_req
- Rebase changes for drm_gpuvm_sm_map_exec_lock()
- Fix kernel-docs
v6
- Use drm_gpuvm_map_req (Danilo/Matt)
v7
- change member name to map instead of op_map
- use local variable to minize the code changes in _sm_map
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Brendan King <Brendan.King@imgtec.com>
Cc: Boris Brezillon <bbrezillon@kernel.org>
Cc: Caterina Shablia <caterina.shablia@collabora.com>
Cc: Rob Clark <robin.clark@oss.qualcomm.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>
Co-developed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Acked-by: Danilo Krummrich <dakr@kernel.org> #v4
Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
---
drivers/gpu/drm/drm_gpuvm.c | 74 +++++++++++---------------
drivers/gpu/drm/imagination/pvr_vm.c | 15 ++++--
drivers/gpu/drm/msm/msm_gem_vma.c | 25 +++++++--
drivers/gpu/drm/nouveau/nouveau_uvmm.c | 11 ++--
drivers/gpu/drm/panthor/panthor_mmu.c | 13 +++--
drivers/gpu/drm/xe/xe_vm.c | 13 +++--
include/drm/drm_gpuvm.h | 20 ++++---
7 files changed, 102 insertions(+), 69 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index bbc7fecb6f4a..6c18cec70f11 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -486,13 +486,18 @@
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
+ * struct drm_gpuvm_map_req map_req = {
+ * .map.va.addr = addr,
+ * .map.va.range = range,
+ * .map.gem.obj = obj,
+ * .map.gem.offset = offset,
+ * };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
- * obj, offset);
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_op op = {};
op.op = DRM_GPUVA_OP_MAP;
- op.map.va.addr = addr;
- op.map.va.range = range;
- op.map.gem.obj = obj;
- op.map.gem.offset = offset;
+ op.map.va.addr = req->map.va.addr;
+ op.map.va.range = req->map.va.range;
+ op.map.gem.obj = req->map.gem.obj;
+ op.map.gem.offset = req->map.gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2102,10 +2106,14 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
struct drm_gpuva *va, *next;
+
+ u64 req_offset = req->map.gem.offset;
+ u64 req_range = req->map.va.range;
+ u64 req_addr = req->map.va.addr;
u64 req_end = req_addr + req_range;
int ret;
@@ -2236,9 +2244,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
}
}
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return op_map_cb(ops, priv, req);
}
static int
@@ -2303,10 +2309,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to struct drm_gpuvm_map_req
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2336,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2343,9 +2345,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2421,10 +2421,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @exec: the &drm_exec locking context
* @num_fences: for newly mapped objects, the # of fences to reserve
- * @req_addr: the start address of the range to unmap
- * @req_range: the range of the mappings to unmap
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to drm_gpuvm_map_req struct
*
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2445,9 +2442,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
* break;
* case DRIVER_OP_MAP:
- * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
- * op->addr, op->range,
- * obj, op->obj_offset);
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
* break;
* }
*
@@ -2478,18 +2473,17 @@ static const struct drm_gpuvm_ops lock_ops = {
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ struct drm_gpuvm_map_req *req)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+
if (req_obj) {
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
if (ret)
return ret;
}
- return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2611,10 +2605,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: map request arguments
*
* This function creates a list of operations to perform splitting and merging
* of existent mapping(s) with the newly requested one.
@@ -2642,8 +2633,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_ops *ops;
struct {
@@ -2661,9 +2651,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
args.vm = gpuvm;
args.ops = ops;
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
- req_obj, req_offset);
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
if (ret)
goto err_free_ops;
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 2896fa7501b1..3d97990170bf 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
{
switch (bind_op->type) {
- case PVR_VM_BIND_TYPE_MAP:
+ case PVR_VM_BIND_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = bind_op->device_addr,
+ .map.va.range = bind_op->size,
+ .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
+ .map.gem.offset = bind_op->offset,
+ };
+
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
- bind_op, bind_op->device_addr,
- bind_op->size,
- gem_from_pvr_gem(bind_op->pvr_obj),
- bind_op->offset);
+ bind_op, &map_req);
+ }
case PVR_VM_BIND_TYPE_UNMAP:
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3cd8562a5109..e106428369ef 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -1172,10 +1172,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
break;
case MSM_VM_BIND_OP_MAP:
case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
- op->iova, op->range,
- op->obj, op->obj_offset);
+ {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
@@ -1283,9 +1290,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
arg.flags |= MSM_VMA_DUMP;
fallthrough;
case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
- op->range, op->obj, op->obj_offset);
+ {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index ddfc46bc1b3e..d94a85509176 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->gem.obj,
+ .map.gem.offset = op->gem.offset,
+ };
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
@@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
- op->va.addr,
- op->va.range,
- op->gem.obj,
- op->gem.offset);
+ &map_req);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 4140f697ba5a..e3cdaa73fd38 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -2169,15 +2169,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
mutex_lock(&vm->op_lock);
vm->op_ctx = op;
switch (op_type) {
- case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->map.vm_bo->obj,
+ .map.gem.offset = op->map.bo_offset,
+ };
+
if (vm->unusable) {
ret = -EINVAL;
break;
}
- ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
- op->map.vm_bo->obj, op->map.bo_offset);
+ ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
+ }
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d40d2d43c041..15359ee738e6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2337,10 +2337,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
- case DRM_XE_VM_BIND_OP_MAP_USERPTR:
- ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
- obj, bo_offset_or_userptr);
+ case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = range,
+ .map.gem.obj = obj,
+ .map.gem.offset = bo_offset_or_userptr,
+ };
+
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
break;
+ }
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
break;
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 274532facfd6..a9fa44148e0c 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1058,10 +1058,20 @@ struct drm_gpuva_ops {
*/
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+/**
+ * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
+ */
+struct drm_gpuvm_map_req {
+ /**
+ * @op_map: struct drm_gpuva_op_map
+ */
+ struct drm_gpuva_op_map map;
+};
+
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuvm_map_req *req);
+
struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
@@ -1205,16 +1215,14 @@ struct drm_gpuvm_ops {
};
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuvm_map_req *req);
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *obj, u64 offset);
+ struct drm_gpuvm_map_req *req);
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
u64 req_addr, u64 req_range);
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v7 03/24] drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create
[not found] <20250814105950.2177480-1-himal.prasad.ghimiray@intel.com>
2025-08-14 10:59 ` [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
@ 2025-08-14 10:59 ` Himal Prasad Ghimiray
2025-08-18 17:32 ` Danilo Krummrich
2025-08-18 20:28 ` [PATCH v7 00/24] MADVISE FOR XE Rodrigo Vivi
2 siblings, 1 reply; 8+ messages in thread
From: Himal Prasad Ghimiray @ 2025-08-14 10:59 UTC (permalink / raw)
To: intel-xe
Cc: Matthew Brost, Thomas Hellström, Himal Prasad Ghimiray,
Danilo Krummrich, Boris Brezillon, dri-devel
This ops is used to iterate over GPUVA's in the user-provided range
and split the existing sparse VMA's if the start or end of the input
range lies within it. The operations can create up to 2 REMAPS and 2 MAPs.
The primary use case is for drivers to assign attributes to GPU VAs in
the specified range without performing unmaps or merging mappings,
supporting fine-grained control over sparse va's.
v2
- use drm_gpuvm_sm_map_ops_create with flags instead of defining new
ops_create (Danilo)
- Add doc (Danilo)
v3
- Fix doc
- Fix unmapping check
v4
- Fix mapping for non madvise ops
v5
- Fix mapping (Matthew Brost)
- Rebase on top of struct changes
v6
- flag moved to map_req
v7 (Danilo)
- Use different functions
- Add kernel-doc
- Modify op_unmap_cb and op_map_cb to handle madvise and NULL ptr
- use gem_obj check in single place
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Boris Brezillon <bbrezillon@kernel.org>
Cc: <dri-devel@lists.freedesktop.org>
Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
---
drivers/gpu/drm/drm_gpuvm.c | 225 ++++++++++++++++++++++++++++++------
include/drm/drm_gpuvm.h | 3 +
2 files changed, 191 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 6c18cec70f11..d6bea8a4fffd 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -420,6 +420,71 @@
* new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
*/
+/**
+ * DOC: Madvise Logic - Splitting and Traversal
+ *
+ * This logic handles GPU VA range updates by generating remap and map operations
+ * without performing unmaps or merging existing mappings.
+ *
+ * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
+ * the existing mapping at the start and end boundaries and inserts a new map.
+ *
+ * ::
+ * a start end b
+ * pre: |-----------------------|
+ * drm_gpuva1
+ *
+ * a start end b
+ * new: |-----|=========|-------|
+ * remap map remap
+ *
+ * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
+ *
+ * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
+ * across boundaries, remapping the start and end segments, and inserting two
+ * map operations to cover the full range.
+ *
+ * :: a start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c end d
+ * new: |-------|==========|--------------|========|---------|
+ * remap1 map1 drm_gpuva2 map2 remap2
+ *
+ * two REMAPS and two MAPS
+ *
+ * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
+ * are generated to update the affected portion.
+ *
+ *
+ * :: a/start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a/start b c end d
+ * new: |------------------|--------------|========|---------|
+ * drm_gpuva1 drm_gpuva2 map1 remap1
+ *
+ * :: a start b c/end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c/end d
+ * new: |-------|==========|--------------|------------------|
+ * remap1 map1 drm_gpuva2 drm_gpuva3
+ *
+ * one REMAP and one MAP
+ *
+ * 4) Both start and end align with existing drm_gpuva boundaries. No operations
+ * are needed as the range is already covered.
+ *
+ * 5) No existing drm_gpuvas. No operations.
+ *
+ * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
+ * focusing solely on remap and map operations for efficient traversal and update.
+ */
+
/**
* DOC: Locking
*
@@ -2063,6 +2128,9 @@ op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
{
struct drm_gpuva_op op = {};
+ if (!req)
+ return 0;
+
op.op = DRM_GPUVA_OP_MAP;
op.map.va.addr = req->map.va.addr;
op.map.va.range = req->map.va.range;
@@ -2092,10 +2160,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
- struct drm_gpuva *va, bool merge)
+ struct drm_gpuva *va, bool merge, bool madvise)
{
struct drm_gpuva_op op = {};
+ if (madvise)
+ return 0;
+
op.op = DRM_GPUVA_OP_UNMAP;
op.unmap.va = va;
op.unmap.keep = merge;
@@ -2106,11 +2177,12 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- const struct drm_gpuvm_map_req *req)
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
{
struct drm_gem_object *req_obj = req->map.gem.obj;
+ const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
struct drm_gpuva *va, *next;
-
u64 req_offset = req->map.gem.offset;
u64 req_range = req->map.va.range;
u64 req_addr = req->map.va.addr;
@@ -2128,19 +2200,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 end = addr + range;
bool merge = !!va->gem.obj;
+ if (madvise && obj)
+ continue;
+
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
continue;
@@ -2161,6 +2236,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr < req_addr) {
@@ -2181,6 +2259,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
@@ -2188,6 +2269,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = req_addr,
+ .map.va.range = end - req_addr,
+ };
+
+ ret = op_map_cb(ops, priv, &map_req);
+ if (ret)
+ return ret;
+ }
+
continue;
}
@@ -2203,6 +2296,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr > req_addr) {
@@ -2211,16 +2307,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req_addr);
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
continue;
}
@@ -2239,12 +2337,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = req_end - addr,
+ };
+
+ return op_map_cb(ops, priv, &map_req);
+ }
break;
}
}
}
-
- return op_map_cb(ops, priv, req);
+ return op_map_cb(ops, priv, op_map);
}
static int
@@ -2296,7 +2402,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
if (ret)
return ret;
} else {
- ret = op_unmap_cb(ops, priv, va, false);
+ ret = op_unmap_cb(ops, priv, va, false, false);
if (ret)
return ret;
}
@@ -2345,7 +2451,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2483,7 +2589,7 @@ drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
return ret;
}
- return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req);
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2602,6 +2708,38 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
.sm_step_unmap = drm_gpuva_sm_step,
};
+static struct drm_gpuva_ops *
+__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
+{
+ struct drm_gpuva_ops *ops;
+ struct {
+ struct drm_gpuvm *vm;
+ struct drm_gpuva_ops *ops;
+ } args;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (unlikely(!ops))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ args.vm = gpuvm;
+ args.ops = ops;
+
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
+ if (ret)
+ goto err_free_ops;
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(gpuvm, ops);
+ return ERR_PTR(ret);
+}
+
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
@@ -2635,34 +2773,47 @@ struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_map_req *req)
{
- struct drm_gpuva_ops *ops;
- struct {
- struct drm_gpuvm *vm;
- struct drm_gpuva_ops *ops;
- } args;
- int ret;
-
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
- if (unlikely(!ops))
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&ops->list);
-
- args.vm = gpuvm;
- args.ops = ops;
-
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
- if (ret)
- goto err_free_ops;
-
- return ops;
-
-err_free_ops:
- drm_gpuva_ops_free(gpuvm, ops);
- return ERR_PTR(ret);
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
+/**
+ * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @req: map request arguments
+ *
+ * This function creates a list of operations to perform splitting
+ * of existent mapping(s) at start or end, based on the request map.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain map and remap operations, but it
+ * also can be empty if no operation is required, e.g. if the requested mapping
+ * already exists is the exact same way.
+ *
+ * There will be no unmap operations, a maximum of two remap operations and two
+ * map operations. The two map operations correspond to: one from start to the
+ * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
+ *
+ * Note that before calling this function again with another mapping request it
+ * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
+ * previously obtained operations must be either processed or abandoned. To
+ * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req)
+{
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
+
/**
* drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 05347ac6cc73..4a22b9d848f7 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1062,6 +1062,9 @@ struct drm_gpuvm_map_req {
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_map_req *req);
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req);
struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct
2025-08-14 10:59 ` [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
@ 2025-08-18 17:27 ` Danilo Krummrich
2025-08-18 18:12 ` Ghimiray, Himal Prasad
2025-08-18 20:49 ` Rob Clark
1 sibling, 1 reply; 8+ messages in thread
From: Danilo Krummrich @ 2025-08-18 17:27 UTC (permalink / raw)
To: Himal Prasad Ghimiray
Cc: intel-xe, Matthew Brost, Thomas Hellström, Boris Brezillon,
Brendan King, Boris Brezillon, Caterina Shablia, Rob Clark,
dri-devel
On Thu Aug 14, 2025 at 12:59 PM CEST, Himal Prasad Ghimiray wrote:
> From: Boris Brezillon <boris.brezillon@collabora.com>
>
> We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
> so, before we do that, let's pass arguments through a struct instead
> of changing each call site every time a new optional argument is added.
>
> v5
> - Use drm_gpuva_op_map—same as drm_gpuvm_map_req
> - Rebase changes for drm_gpuvm_sm_map_exec_lock()
> - Fix kernel-docs
>
> v6
> - Use drm_gpuvm_map_req (Danilo/Matt)
>
> v7
> - change member name to map instead of op_map
> - use local variable to minize the code changes in _sm_map
I know it's a thing in DRM, but I'd rather not have the version changes in the
commit message. Please move them below "---" or into the cover letter.
(No need to resend for this. :)
> Cc: Danilo Krummrich <dakr@kernel.org>
> Cc: Brendan King <Brendan.King@imgtec.com>
> Cc: Boris Brezillon <bbrezillon@kernel.org>
> Cc: Caterina Shablia <caterina.shablia@collabora.com>
> Cc: Rob Clark <robin.clark@oss.qualcomm.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Cc: <dri-devel@lists.freedesktop.org>
> Co-developed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
> Acked-by: Danilo Krummrich <dakr@kernel.org> #v4
> Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v7 03/24] drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create
2025-08-14 10:59 ` [PATCH v7 03/24] drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create Himal Prasad Ghimiray
@ 2025-08-18 17:32 ` Danilo Krummrich
2025-08-18 18:16 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 8+ messages in thread
From: Danilo Krummrich @ 2025-08-18 17:32 UTC (permalink / raw)
To: Himal Prasad Ghimiray
Cc: intel-xe, Matthew Brost, Thomas Hellström, Boris Brezillon,
dri-devel
On Thu Aug 14, 2025 at 12:59 PM CEST, Himal Prasad Ghimiray wrote:
> This ops is used to iterate over GPUVA's in the user-provided range
> and split the existing sparse VMA's if the start or end of the input
> range lies within it. The operations can create up to 2 REMAPS and 2 MAPs.
>
> The primary use case is for drivers to assign attributes to GPU VAs in
> the specified range without performing unmaps or merging mappings,
> supporting fine-grained control over sparse va's.
>
> v2
> - use drm_gpuvm_sm_map_ops_create with flags instead of defining new
> ops_create (Danilo)
> - Add doc (Danilo)
>
> v3
> - Fix doc
> - Fix unmapping check
>
> v4
> - Fix mapping for non madvise ops
>
> v5
> - Fix mapping (Matthew Brost)
> - Rebase on top of struct changes
>
> v6
> - flag moved to map_req
>
> v7 (Danilo)
> - Use different functions
> - Add kernel-doc
> - Modify op_unmap_cb and op_map_cb to handle madvise and NULL ptr
> - use gem_obj check in single place
(Same comment as in patch 1.)
> Cc: Danilo Krummrich <dakr@kernel.org>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Cc: Boris Brezillon <bbrezillon@kernel.org>
> Cc: <dri-devel@lists.freedesktop.org>
> Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray@intel.com>
> Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
This looks pretty clean now, and thanks for adding the documentation.
Acked-by: Danilo Krummrich <dakr@kernel.org>
Feel free to take this through the Xe tree if necessary.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct
2025-08-18 17:27 ` Danilo Krummrich
@ 2025-08-18 18:12 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 8+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-08-18 18:12 UTC (permalink / raw)
To: Danilo Krummrich
Cc: intel-xe, Matthew Brost, Thomas Hellström, Boris Brezillon,
Brendan King, Boris Brezillon, Caterina Shablia, Rob Clark,
dri-devel
On 18-08-2025 22:57, Danilo Krummrich wrote:
> On Thu Aug 14, 2025 at 12:59 PM CEST, Himal Prasad Ghimiray wrote:
>> From: Boris Brezillon <boris.brezillon@collabora.com>
>>
>> We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
>> so, before we do that, let's pass arguments through a struct instead
>> of changing each call site every time a new optional argument is added.
>>
>> v5
>> - Use drm_gpuva_op_map—same as drm_gpuvm_map_req
>> - Rebase changes for drm_gpuvm_sm_map_exec_lock()
>> - Fix kernel-docs
>>
>> v6
>> - Use drm_gpuvm_map_req (Danilo/Matt)
>>
>> v7
>> - change member name to map instead of op_map
>> - use local variable to minize the code changes in _sm_map
>
> I know it's a thing in DRM, but I'd rather not have the version changes in the
> commit message. Please move them below "---" or into the cover letter.
>
> (No need to resend for this. :)
Sure. Thanks
>
>> Cc: Danilo Krummrich <dakr@kernel.org>
>> Cc: Brendan King <Brendan.King@imgtec.com>
>> Cc: Boris Brezillon <bbrezillon@kernel.org>
>> Cc: Caterina Shablia <caterina.shablia@collabora.com>
>> Cc: Rob Clark <robin.clark@oss.qualcomm.com>
>> Cc: Matthew Brost <matthew.brost@intel.com>
>> Cc: <dri-devel@lists.freedesktop.org>
>> Co-developed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
>> Acked-by: Danilo Krummrich <dakr@kernel.org> #v4
>> Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v7 03/24] drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create
2025-08-18 17:32 ` Danilo Krummrich
@ 2025-08-18 18:16 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 8+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-08-18 18:16 UTC (permalink / raw)
To: Danilo Krummrich
Cc: intel-xe, Matthew Brost, Thomas Hellström, Boris Brezillon,
dri-devel
On 18-08-2025 23:02, Danilo Krummrich wrote:
> On Thu Aug 14, 2025 at 12:59 PM CEST, Himal Prasad Ghimiray wrote:
>> This ops is used to iterate over GPUVA's in the user-provided range
>> and split the existing sparse VMA's if the start or end of the input
>> range lies within it. The operations can create up to 2 REMAPS and 2 MAPs.
>>
>> The primary use case is for drivers to assign attributes to GPU VAs in
>> the specified range without performing unmaps or merging mappings,
>> supporting fine-grained control over sparse va's.
>>
>> v2
>> - use drm_gpuvm_sm_map_ops_create with flags instead of defining new
>> ops_create (Danilo)
>> - Add doc (Danilo)
>>
>> v3
>> - Fix doc
>> - Fix unmapping check
>>
>> v4
>> - Fix mapping for non madvise ops
>>
>> v5
>> - Fix mapping (Matthew Brost)
>> - Rebase on top of struct changes
>>
>> v6
>> - flag moved to map_req
>>
>> v7 (Danilo)
>> - Use different functions
>> - Add kernel-doc
>> - Modify op_unmap_cb and op_map_cb to handle madvise and NULL ptr
>> - use gem_obj check in single place
>
> (Same comment as in patch 1.)
>
>> Cc: Danilo Krummrich <dakr@kernel.org>
>> Cc: Matthew Brost <matthew.brost@intel.com>
>> Cc: Boris Brezillon <bbrezillon@kernel.org>
>> Cc: <dri-devel@lists.freedesktop.org>
>> Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray@intel.com>
>> Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
>
> This looks pretty clean now, and thanks for adding the documentation.
>
> Acked-by: Danilo Krummrich <dakr@kernel.org>
>
> Feel free to take this through the Xe tree if necessary.
Thanks
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v7 00/24] MADVISE FOR XE
[not found] <20250814105950.2177480-1-himal.prasad.ghimiray@intel.com>
2025-08-14 10:59 ` [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
2025-08-14 10:59 ` [PATCH v7 03/24] drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create Himal Prasad Ghimiray
@ 2025-08-18 20:28 ` Rodrigo Vivi
2 siblings, 0 replies; 8+ messages in thread
From: Rodrigo Vivi @ 2025-08-18 20:28 UTC (permalink / raw)
To: Himal Prasad Ghimiray, dri-devel
Cc: intel-xe, Matthew Brost, Thomas Hellström
On Thu, Aug 14, 2025 at 04:29:26PM +0530, Himal Prasad Ghimiray wrote:
> -v8
> Define new function in gpuvm for madvise
> Fix tile check in ops_create
Please re-send this series again. But this time, ensure that dri-devel
is in the --to= of the series!
In 8 revisions, it looks like not a single time it was sent completely
to dri-devel. Big changes like this needs to be pretty visible.
Ensure to get all the appropriate acks in all the relevant patches as
well after resending to dri-devel.
Thanks,
Rodrigo.
>
> -v7
> Change gpuvm layering on gpuvm_map_req struct
> Fix EAGAIN return on garbage collector splitting vma
>
> -v6
> Rebase on gpuvm patches
> Address review comments
>
> -v5
> Restore attributes to default after free from userspace
> Add defragment worker to merge cpu mirror vma with default attributes
> Avoid using VMA in uapi
> address review comments
>
> -v4:
> fix atomic policies
> fix attribute copy
> address review comments
>
> Provides a user API to assign attributes like pat_index, atomic
> operation type, and preferred location for SVM ranges.
> The Kernel Mode Driver (KMD) may split existing VMAs to cover input
> ranges, assign user-provided attributes, and invalidate existing PTEs so
> that the next page fault/prefetch can use the new attributes.
>
> Boris Brezillon (2):
> drm/gpuvm: Pass map arguments through a struct
> drm/gpuvm: Kill drm_gpuva_init()
>
> Himal Prasad Ghimiray (22):
> drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create
> drm/xe/uapi: Add madvise interface
> drm/xe/vm: Add attributes struct as member of vma
> drm/xe/vma: Move pat_index to vma attributes
> drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as
> parameter
> drm/gpusvm: Make drm_gpusvm_for_each_* macros public
> drm/xe/svm: Split system allocator vma incase of madvise call
> drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for
> madvise
> drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
> drm/xe: Implement madvise ioctl for xe
> drm/xe/svm: Add svm ranges migration policy on atomic access
> drm/xe/madvise: Update migration policy based on preferred location
> drm/xe/svm: Support DRM_XE_SVM_MEM_RANGE_ATTR_PAT memory attribute
> drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch
> drm/xe/svm: Consult madvise preferred location in prefetch
> drm/xe/bo: Add attributes field to xe_bo
> drm/xe/bo: Update atomic_access attribute on madvise
> drm/xe/madvise: Skip vma invalidation if mem attr are unchanged
> drm/xe/vm: Add helper to check for default VMA memory attributes
> drm/xe: Reset VMA attributes to default in SVM garbage collector
> drm/xe: Enable madvise ioctl for xe
> drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
>
> drivers/gpu/drm/drm_gpusvm.c | 122 ++-----
> drivers/gpu/drm/drm_gpuvm.c | 287 ++++++++++++----
> drivers/gpu/drm/imagination/pvr_vm.c | 15 +-
> drivers/gpu/drm/msm/msm_gem_vma.c | 33 +-
> drivers/gpu/drm/nouveau/nouveau_uvmm.c | 11 +-
> drivers/gpu/drm/panthor/panthor_mmu.c | 13 +-
> drivers/gpu/drm/xe/Makefile | 1 +
> drivers/gpu/drm/xe/xe_bo.c | 29 +-
> drivers/gpu/drm/xe/xe_bo_types.h | 8 +
> drivers/gpu/drm/xe/xe_device.c | 4 +
> drivers/gpu/drm/xe/xe_gt_pagefault.c | 35 +-
> drivers/gpu/drm/xe/xe_pt.c | 39 ++-
> drivers/gpu/drm/xe/xe_svm.c | 254 ++++++++++++--
> drivers/gpu/drm/xe/xe_svm.h | 23 ++
> drivers/gpu/drm/xe/xe_vm.c | 438 ++++++++++++++++++++++--
> drivers/gpu/drm/xe/xe_vm.h | 10 +-
> drivers/gpu/drm/xe/xe_vm_madvise.c | 445 +++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm_madvise.h | 15 +
> drivers/gpu/drm/xe/xe_vm_types.h | 57 +++-
> include/drm/drm_gpusvm.h | 70 ++++
> include/drm/drm_gpuvm.h | 38 ++-
> include/uapi/drm/xe_drm.h | 274 +++++++++++++++
> 22 files changed, 1922 insertions(+), 299 deletions(-)
> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct
2025-08-14 10:59 ` [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
2025-08-18 17:27 ` Danilo Krummrich
@ 2025-08-18 20:49 ` Rob Clark
1 sibling, 0 replies; 8+ messages in thread
From: Rob Clark @ 2025-08-18 20:49 UTC (permalink / raw)
To: Himal Prasad Ghimiray
Cc: intel-xe, Matthew Brost, Thomas Hellström, Boris Brezillon,
Danilo Krummrich, Brendan King, Boris Brezillon, Caterina Shablia,
dri-devel
On Thu, Aug 14, 2025 at 3:32 AM Himal Prasad Ghimiray
<himal.prasad.ghimiray@intel.com> wrote:
>
> From: Boris Brezillon <boris.brezillon@collabora.com>
>
> We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
> so, before we do that, let's pass arguments through a struct instead
> of changing each call site every time a new optional argument is added.
>
> v5
> - Use drm_gpuva_op_map—same as drm_gpuvm_map_req
> - Rebase changes for drm_gpuvm_sm_map_exec_lock()
> - Fix kernel-docs
>
> v6
> - Use drm_gpuvm_map_req (Danilo/Matt)
>
> v7
> - change member name to map instead of op_map
> - use local variable to minize the code changes in _sm_map
>
> Cc: Danilo Krummrich <dakr@kernel.org>
> Cc: Brendan King <Brendan.King@imgtec.com>
> Cc: Boris Brezillon <bbrezillon@kernel.org>
> Cc: Caterina Shablia <caterina.shablia@collabora.com>
> Cc: Rob Clark <robin.clark@oss.qualcomm.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Cc: <dri-devel@lists.freedesktop.org>
> Co-developed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
> Acked-by: Danilo Krummrich <dakr@kernel.org> #v4
> Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
> ---
> drivers/gpu/drm/drm_gpuvm.c | 74 +++++++++++---------------
> drivers/gpu/drm/imagination/pvr_vm.c | 15 ++++--
> drivers/gpu/drm/msm/msm_gem_vma.c | 25 +++++++--
> drivers/gpu/drm/nouveau/nouveau_uvmm.c | 11 ++--
> drivers/gpu/drm/panthor/panthor_mmu.c | 13 +++--
> drivers/gpu/drm/xe/xe_vm.c | 13 +++--
> include/drm/drm_gpuvm.h | 20 ++++---
> 7 files changed, 102 insertions(+), 69 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
> index bbc7fecb6f4a..6c18cec70f11 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -486,13 +486,18 @@
> * u64 addr, u64 range,
> * struct drm_gem_object *obj, u64 offset)
> * {
> + * struct drm_gpuvm_map_req map_req = {
> + * .map.va.addr = addr,
> + * .map.va.range = range,
> + * .map.gem.obj = obj,
> + * .map.gem.offset = offset,
> + * };
> * struct drm_gpuva_ops *ops;
> * struct drm_gpuva_op *op
> * struct drm_gpuvm_bo *vm_bo;
> *
> * driver_lock_va_space();
> - * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
> - * obj, offset);
> + * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
> * if (IS_ERR(ops))
> * return PTR_ERR(ops);
> *
> @@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
>
> static int
> op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
> - u64 addr, u64 range,
> - struct drm_gem_object *obj, u64 offset)
> + const struct drm_gpuvm_map_req *req)
> {
> struct drm_gpuva_op op = {};
>
> op.op = DRM_GPUVA_OP_MAP;
> - op.map.va.addr = addr;
> - op.map.va.range = range;
> - op.map.gem.obj = obj;
> - op.map.gem.offset = offset;
> + op.map.va.addr = req->map.va.addr;
> + op.map.va.range = req->map.va.range;
> + op.map.gem.obj = req->map.gem.obj;
> + op.map.gem.offset = req->map.gem.offset;
>
> return fn->sm_step_map(&op, priv);
> }
> @@ -2102,10 +2106,14 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
> static int
> __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> const struct drm_gpuvm_ops *ops, void *priv,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + const struct drm_gpuvm_map_req *req)
> {
> + struct drm_gem_object *req_obj = req->map.gem.obj;
> struct drm_gpuva *va, *next;
> +
> + u64 req_offset = req->map.gem.offset;
> + u64 req_range = req->map.va.range;
> + u64 req_addr = req->map.va.addr;
> u64 req_end = req_addr + req_range;
> int ret;
>
> @@ -2236,9 +2244,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> }
> }
>
> - return op_map_cb(ops, priv,
> - req_addr, req_range,
> - req_obj, req_offset);
> + return op_map_cb(ops, priv, req);
> }
>
> static int
> @@ -2303,10 +2309,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
> * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
> * @gpuvm: the &drm_gpuvm representing the GPU VA space
> * @priv: pointer to a driver private data structure
> - * @req_addr: the start address of the new mapping
> - * @req_range: the range of the new mapping
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @req: ptr to struct drm_gpuvm_map_req
> *
> * This function iterates the given range of the GPU VA space. It utilizes the
> * &drm_gpuvm_ops to call back into the driver providing the split and merge
> @@ -2333,8 +2336,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
> */
> int
> drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + const struct drm_gpuvm_map_req *req)
> {
> const struct drm_gpuvm_ops *ops = gpuvm->ops;
>
> @@ -2343,9 +2345,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> ops->sm_step_unmap)))
> return -EINVAL;
>
> - return __drm_gpuvm_sm_map(gpuvm, ops, priv,
> - req_addr, req_range,
> - req_obj, req_offset);
> + return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
> }
> EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
>
> @@ -2421,10 +2421,7 @@ static const struct drm_gpuvm_ops lock_ops = {
> * @gpuvm: the &drm_gpuvm representing the GPU VA space
> * @exec: the &drm_exec locking context
> * @num_fences: for newly mapped objects, the # of fences to reserve
> - * @req_addr: the start address of the range to unmap
> - * @req_range: the range of the mappings to unmap
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @req: ptr to drm_gpuvm_map_req struct
> *
> * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
> * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
> @@ -2445,9 +2442,7 @@ static const struct drm_gpuvm_ops lock_ops = {
> * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
> * break;
> * case DRIVER_OP_MAP:
> - * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
> - * op->addr, op->range,
> - * obj, op->obj_offset);
> + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
> * break;
> * }
> *
> @@ -2478,18 +2473,17 @@ static const struct drm_gpuvm_ops lock_ops = {
> int
> drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
> struct drm_exec *exec, unsigned int num_fences,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + struct drm_gpuvm_map_req *req)
> {
> + struct drm_gem_object *req_obj = req->map.gem.obj;
> +
> if (req_obj) {
> int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
> if (ret)
> return ret;
> }
>
> - return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
> - req_addr, req_range,
> - req_obj, req_offset);
> + return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req);
>
> }
> EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
> @@ -2611,10 +2605,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
> /**
> * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
> * @gpuvm: the &drm_gpuvm representing the GPU VA space
> - * @req_addr: the start address of the new mapping
> - * @req_range: the range of the new mapping
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @req: map request arguments
> *
> * This function creates a list of operations to perform splitting and merging
> * of existent mapping(s) with the newly requested one.
> @@ -2642,8 +2633,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
> */
> struct drm_gpuva_ops *
> drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + const struct drm_gpuvm_map_req *req)
> {
> struct drm_gpuva_ops *ops;
> struct {
> @@ -2661,9 +2651,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> args.vm = gpuvm;
> args.ops = ops;
>
> - ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
> - req_addr, req_range,
> - req_obj, req_offset);
> + ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
> if (ret)
> goto err_free_ops;
>
> diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
> index 2896fa7501b1..3d97990170bf 100644
> --- a/drivers/gpu/drm/imagination/pvr_vm.c
> +++ b/drivers/gpu/drm/imagination/pvr_vm.c
> @@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
> static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
> {
> switch (bind_op->type) {
> - case PVR_VM_BIND_TYPE_MAP:
> + case PVR_VM_BIND_TYPE_MAP: {
> + const struct drm_gpuvm_map_req map_req = {
> + .map.va.addr = bind_op->device_addr,
> + .map.va.range = bind_op->size,
> + .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
> + .map.gem.offset = bind_op->offset,
> + };
> +
> return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
> - bind_op, bind_op->device_addr,
> - bind_op->size,
> - gem_from_pvr_gem(bind_op->pvr_obj),
> - bind_op->offset);
> + bind_op, &map_req);
> + }
>
> case PVR_VM_BIND_TYPE_UNMAP:
> return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
> diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
> index 3cd8562a5109..e106428369ef 100644
> --- a/drivers/gpu/drm/msm/msm_gem_vma.c
> +++ b/drivers/gpu/drm/msm/msm_gem_vma.c
> @@ -1172,10 +1172,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
> break;
> case MSM_VM_BIND_OP_MAP:
> case MSM_VM_BIND_OP_MAP_NULL:
> - ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
> - op->iova, op->range,
> - op->obj, op->obj_offset);
> + {
nit (here and below), I'd prefer the opening brace on the same line as
the case statement (no need to resend for that, you can fixup when you
apply)
Reviewed-by: Rob Clark <robin.clark@oss.qualcomm.com>
> + struct drm_gpuvm_map_req map_req = {
> + .map.va.addr = op->iova,
> + .map.va.range = op->range,
> + .map.gem.obj = op->obj,
> + .map.gem.offset = op->obj_offset,
> + };
> +
> + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
> break;
> + }
> default:
> /*
> * lookup_op() should have already thrown an error for
> @@ -1283,9 +1290,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
> arg.flags |= MSM_VMA_DUMP;
> fallthrough;
> case MSM_VM_BIND_OP_MAP_NULL:
> - ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
> - op->range, op->obj, op->obj_offset);
> + {
> + struct drm_gpuvm_map_req map_req = {
> + .map.va.addr = op->iova,
> + .map.va.range = op->range,
> + .map.gem.obj = op->obj,
> + .map.gem.offset = op->obj_offset,
> + };
> +
> + ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
> break;
> + }
> default:
> /*
> * lookup_op() should have already thrown an error for
> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> index ddfc46bc1b3e..d94a85509176 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> @@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
> break;
> case OP_MAP: {
> struct nouveau_uvma_region *reg;
> + struct drm_gpuvm_map_req map_req = {
> + .map.va.addr = op->va.addr,
> + .map.va.range = op->va.range,
> + .map.gem.obj = op->gem.obj,
> + .map.gem.offset = op->gem.offset,
> + };
>
> reg = nouveau_uvma_region_find_first(uvmm,
> op->va.addr,
> @@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
> }
>
> op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
> - op->va.addr,
> - op->va.range,
> - op->gem.obj,
> - op->gem.offset);
> + &map_req);
> if (IS_ERR(op->ops)) {
> ret = PTR_ERR(op->ops);
> goto unwind_continue;
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 4140f697ba5a..e3cdaa73fd38 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -2169,15 +2169,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
> mutex_lock(&vm->op_lock);
> vm->op_ctx = op;
> switch (op_type) {
> - case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
> + case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
> + const struct drm_gpuvm_map_req map_req = {
> + .map.va.addr = op->va.addr,
> + .map.va.range = op->va.range,
> + .map.gem.obj = op->map.vm_bo->obj,
> + .map.gem.offset = op->map.bo_offset,
> + };
> +
> if (vm->unusable) {
> ret = -EINVAL;
> break;
> }
>
> - ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
> - op->map.vm_bo->obj, op->map.bo_offset);
> + ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
> break;
> + }
>
> case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
> ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index d40d2d43c041..15359ee738e6 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2337,10 +2337,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>
> switch (operation) {
> case DRM_XE_VM_BIND_OP_MAP:
> - case DRM_XE_VM_BIND_OP_MAP_USERPTR:
> - ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
> - obj, bo_offset_or_userptr);
> + case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
> + struct drm_gpuvm_map_req map_req = {
> + .map.va.addr = addr,
> + .map.va.range = range,
> + .map.gem.obj = obj,
> + .map.gem.offset = bo_offset_or_userptr,
> + };
> +
> + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
> break;
> + }
> case DRM_XE_VM_BIND_OP_UNMAP:
> ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
> break;
> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> index 274532facfd6..a9fa44148e0c 100644
> --- a/include/drm/drm_gpuvm.h
> +++ b/include/drm/drm_gpuvm.h
> @@ -1058,10 +1058,20 @@ struct drm_gpuva_ops {
> */
> #define drm_gpuva_next_op(op) list_next_entry(op, entry)
>
> +/**
> + * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
> + */
> +struct drm_gpuvm_map_req {
> + /**
> + * @op_map: struct drm_gpuva_op_map
> + */
> + struct drm_gpuva_op_map map;
> +};
> +
> struct drm_gpuva_ops *
> drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> - u64 addr, u64 range,
> - struct drm_gem_object *obj, u64 offset);
> + const struct drm_gpuvm_map_req *req);
> +
> struct drm_gpuva_ops *
> drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
> u64 addr, u64 range);
> @@ -1205,16 +1215,14 @@ struct drm_gpuvm_ops {
> };
>
> int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> - u64 addr, u64 range,
> - struct drm_gem_object *obj, u64 offset);
> + const struct drm_gpuvm_map_req *req);
>
> int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
> u64 addr, u64 range);
>
> int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
> struct drm_exec *exec, unsigned int num_fences,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *obj, u64 offset);
> + struct drm_gpuvm_map_req *req);
>
> int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
> u64 req_addr, u64 req_range);
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2025-08-18 20:50 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20250814105950.2177480-1-himal.prasad.ghimiray@intel.com>
2025-08-14 10:59 ` [PATCH v7 01/24] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
2025-08-18 17:27 ` Danilo Krummrich
2025-08-18 18:12 ` Ghimiray, Himal Prasad
2025-08-18 20:49 ` Rob Clark
2025-08-14 10:59 ` [PATCH v7 03/24] drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create Himal Prasad Ghimiray
2025-08-18 17:32 ` Danilo Krummrich
2025-08-18 18:16 ` Ghimiray, Himal Prasad
2025-08-18 20:28 ` [PATCH v7 00/24] MADVISE FOR XE Rodrigo Vivi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).