* [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
[not found] <20250730130050.1001648-1-himal.prasad.ghimiray@intel.com>
@ 2025-07-30 13:00 ` Himal Prasad Ghimiray
2025-07-30 23:23 ` kernel test robot
` (2 more replies)
2025-07-30 13:00 ` [PATCH v5 04/25] drm/gpuvm: Introduce DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE flag Himal Prasad Ghimiray
1 sibling, 3 replies; 10+ messages in thread
From: Himal Prasad Ghimiray @ 2025-07-30 13:00 UTC (permalink / raw)
To: intel-xe
Cc: Matthew Brost, Thomas Hellström, Boris Brezillon,
Danilo Krummrich, Boris Brezillon, Caterina Shablia, Rob Clark,
dri-devel, Danilo Krummrich, Himal Prasad Ghimiray
From: Boris Brezillon <boris.brezillon@collabora.com>
We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
so, before we do that, let's pass arguments through a struct instead
of changing each call site every time a new optional argument is added.
v5
- Use drm_gpuva_op_map—same as drm_gpuvm_map_req (Danilo)
- Rebase changes for drm_gpuvm_sm_map_exec_lock()
- Fix kernel-docs
Cc: Danilo Krummrich <dakr@redhat.com>
Cc: Boris Brezillon <bbrezillon@kernel.org>
Cc: Caterina Shablia <caterina.shablia@collabora.com>
Cc: Rob Clark <robin.clark@oss.qualcomm.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>
Acked-by: Danilo Krummrich <dakr@kernel.org> (#v4)
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Caterina Shablia <caterina.shablia@collabora.com>
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/drm_gpuvm.c | 106 ++++++++++---------------
drivers/gpu/drm/imagination/pvr_vm.c | 15 ++--
drivers/gpu/drm/msm/msm_gem_vma.c | 33 ++++++--
drivers/gpu/drm/nouveau/nouveau_uvmm.c | 11 ++-
drivers/gpu/drm/panthor/panthor_mmu.c | 13 ++-
drivers/gpu/drm/xe/xe_vm.c | 13 ++-
include/drm/drm_gpuvm.h | 10 +--
7 files changed, 112 insertions(+), 89 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index bbc7fecb6f4a..f04d80a3a63b 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -486,13 +486,18 @@
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
+ * struct drm_gpuva_op_map op_map = {
+ * .va.addr = addr,
+ * .va.range = range,
+ * .gem.obj = obj,
+ * .gem.offset = offset,
+ * };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
- * obj, offset);
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &op_map);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
+ const struct drm_gpuva_op_map *req)
{
struct drm_gpuva_op op = {};
op.op = DRM_GPUVA_OP_MAP;
- op.map.va.addr = addr;
- op.map.va.range = range;
- op.map.gem.obj = obj;
- op.map.gem.offset = offset;
+ op.map.va.addr = req->va.addr;
+ op.map.va.range = req->va.range;
+ op.map.gem.obj = req->gem.obj;
+ op.map.gem.offset = req->gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2102,17 +2106,16 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuva_op_map *req)
{
struct drm_gpuva *va, *next;
- u64 req_end = req_addr + req_range;
+ u64 req_end = req->va.addr + req->va.range;
int ret;
- if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
+ if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
return -EINVAL;
- drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
+ drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req->va.addr, req_end) {
struct drm_gem_object *obj = va->gem.obj;
u64 offset = va->gem.offset;
u64 addr = va->va.addr;
@@ -2120,9 +2123,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 end = addr + range;
bool merge = !!va->gem.obj;
- if (addr == req_addr) {
- merge &= obj == req_obj &&
- offset == req_offset;
+ if (addr == req->va.addr) {
+ merge &= obj == req->gem.obj &&
+ offset == req->gem.offset;
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
@@ -2141,9 +2144,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
if (end > req_end) {
struct drm_gpuva_op_map n = {
.va.addr = req_end,
- .va.range = range - req_range,
+ .va.range = range - req->va.range,
.gem.obj = obj,
- .gem.offset = offset + req_range,
+ .gem.offset = offset + req->va.range,
};
struct drm_gpuva_op_unmap u = {
.va = va,
@@ -2155,8 +2158,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
return ret;
break;
}
- } else if (addr < req_addr) {
- u64 ls_range = req_addr - addr;
+ } else if (addr < req->va.addr) {
+ u64 ls_range = req->va.addr - addr;
struct drm_gpuva_op_map p = {
.va.addr = addr,
.va.range = ls_range,
@@ -2165,8 +2168,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
};
struct drm_gpuva_op_unmap u = { .va = va };
- merge &= obj == req_obj &&
- offset + ls_range == req_offset;
+ merge &= obj == req->gem.obj &&
+ offset + ls_range == req->gem.offset;
u.keep = merge;
if (end == req_end) {
@@ -2189,7 +2192,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
.va.range = end - req_end,
.gem.obj = obj,
.gem.offset = offset + ls_range +
- req_range,
+ req->va.range,
};
ret = op_remap_cb(ops, priv, &p, &n, &u);
@@ -2197,10 +2200,10 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
return ret;
break;
}
- } else if (addr > req_addr) {
- merge &= obj == req_obj &&
- offset == req_offset +
- (addr - req_addr);
+ } else if (addr > req->va.addr) {
+ merge &= obj == req->gem.obj &&
+ offset == req->gem.offset +
+ (addr - req->va.addr);
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
@@ -2236,9 +2239,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
}
}
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return op_map_cb(ops, priv, req);
}
static int
@@ -2303,10 +2304,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to drm_gpuva_op_map struct
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2331,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuva_op_map *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2343,9 +2340,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2421,10 +2416,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @exec: the &drm_exec locking context
* @num_fences: for newly mapped objects, the # of fences to reserve
- * @req_addr: the start address of the range to unmap
- * @req_range: the range of the mappings to unmap
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @op: ptr to drm_gpuva_op_map struct
*
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2442,12 +2434,10 @@ static const struct drm_gpuvm_ops lock_ops = {
* for_each_vm_bind_operation {
* switch (op->op) {
* case DRIVER_OP_UNMAP:
- * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
+ * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->va.addr, op->va.range);
* break;
* case DRIVER_OP_MAP:
- * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
- * op->addr, op->range,
- * obj, op->obj_offset);
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, op);
* break;
* }
*
@@ -2478,18 +2468,16 @@ static const struct drm_gpuvm_ops lock_ops = {
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ struct drm_gpuva_op_map *req)
{
- if (req_obj) {
- int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
+ if (req->gem.obj) {
+ int ret = drm_exec_prepare_obj(exec, req->gem.obj, num_fences);
if (ret)
return ret;
}
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
- req_addr, req_range,
- req_obj, req_offset);
+ req);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2611,10 +2599,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to drm_gpuva_op_map struct
*
* This function creates a list of operations to perform splitting and merging
* of existent mapping(s) with the newly requested one.
@@ -2642,8 +2627,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuva_op_map *req)
{
struct drm_gpuva_ops *ops;
struct {
@@ -2661,9 +2645,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
args.vm = gpuvm;
args.ops = ops;
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
- req_obj, req_offset);
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
if (ret)
goto err_free_ops;
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 2896fa7501b1..57116709de81 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
{
switch (bind_op->type) {
- case PVR_VM_BIND_TYPE_MAP:
+ case PVR_VM_BIND_TYPE_MAP: {
+ const struct drm_gpuva_op_map map_req = {
+ .va.addr = bind_op->device_addr,
+ .va.range = bind_op->size,
+ .gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
+ .gem.offset = bind_op->offset,
+ };
+
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
- bind_op, bind_op->device_addr,
- bind_op->size,
- gem_from_pvr_gem(bind_op->pvr_obj),
- bind_op->offset);
+ bind_op, &map_req);
+ }
case PVR_VM_BIND_TYPE_UNMAP:
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3cd8562a5109..59a9b41bc967 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -371,6 +371,12 @@ struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
u64 offset, u64 range_start, u64 range_end)
{
+ struct drm_gpuva_op_map op_map = {
+ .va.addr = range_start,
+ .va.range = range_end - range_start,
+ .gem.obj = obj,
+ .gem.offset = offset,
+ };
struct msm_gem_vm *vm = to_msm_vm(gpuvm);
struct drm_gpuvm_bo *vm_bo;
struct msm_gem_vma *vma;
@@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
if (obj)
GEM_WARN_ON((range_end - range_start) > obj->size);
- drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
+ drm_gpuva_init_from_op(&vma->base, &op_map);
vma->mapped = false;
ret = drm_gpuva_insert(&vm->base, &vma->base);
@@ -1172,10 +1178,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
break;
case MSM_VM_BIND_OP_MAP:
case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
- op->iova, op->range,
- op->obj, op->obj_offset);
+ {
+ struct drm_gpuva_op_map map_req = {
+ .va.addr = op->iova,
+ .va.range = op->range,
+ .gem.obj = op->obj,
+ .gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
@@ -1283,9 +1296,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
arg.flags |= MSM_VMA_DUMP;
fallthrough;
case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
- op->range, op->obj, op->obj_offset);
+ {
+ struct drm_gpuva_op_map map_req = {
+ .va.addr = op->iova,
+ .va.range = op->range,
+ .gem.obj = op->obj,
+ .gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index ddfc46bc1b3e..b74054b0a476 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
+ struct drm_gpuva_op_map map_req = {
+ .va.addr = op->va.addr,
+ .va.range = op->va.range,
+ .gem.obj = op->gem.obj,
+ .gem.offset = op->gem.offset,
+ };
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
@@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
- op->va.addr,
- op->va.range,
- op->gem.obj,
- op->gem.offset);
+ &map_req);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 4140f697ba5a..5fd4245a57b9 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -2169,15 +2169,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
mutex_lock(&vm->op_lock);
vm->op_ctx = op;
switch (op_type) {
- case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
+ const struct drm_gpuva_op_map map_req = {
+ .va.addr = op->va.addr,
+ .va.range = op->va.range,
+ .gem.obj = op->map.vm_bo->obj,
+ .gem.offset = op->map.bo_offset,
+ };
+
if (vm->unusable) {
ret = -EINVAL;
break;
}
- ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
- op->map.vm_bo->obj, op->map.bo_offset);
+ ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
+ }
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 432ea325677d..4b3e78745363 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
- case DRM_XE_VM_BIND_OP_MAP_USERPTR:
- ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
- obj, bo_offset_or_userptr);
+ case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
+ struct drm_gpuva_op_map map_req = {
+ .va.addr = addr,
+ .va.range = range,
+ .gem.obj = obj,
+ .gem.offset = bo_offset_or_userptr,
+ };
+
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
break;
+ }
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
break;
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 274532facfd6..892ffe75a62f 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1060,8 +1060,8 @@ struct drm_gpuva_ops {
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuva_op_map *req);
+
struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
@@ -1205,16 +1205,14 @@ struct drm_gpuvm_ops {
};
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuva_op_map *req);
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *obj, u64 offset);
+ struct drm_gpuva_op_map *req);
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
u64 req_addr, u64 req_range);
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v5 04/25] drm/gpuvm: Introduce DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE flag
[not found] <20250730130050.1001648-1-himal.prasad.ghimiray@intel.com>
2025-07-30 13:00 ` [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
@ 2025-07-30 13:00 ` Himal Prasad Ghimiray
2025-08-05 19:24 ` Matthew Brost
1 sibling, 1 reply; 10+ messages in thread
From: Himal Prasad Ghimiray @ 2025-07-30 13:00 UTC (permalink / raw)
To: intel-xe
Cc: Matthew Brost, Thomas Hellström, Himal Prasad Ghimiray,
Danilo Krummrich, Boris Brezillon, dri-devel
- DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE: This flag is used by
drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
user-provided range and split the existing non-GEM object VMA if the
start or end of the input range lies within it. The operations can
create up to 2 REMAPS and 2 MAPs. The purpose of this operation is to be
used by the Xe driver to assign attributes to GPUVMA's within the
user-defined range. Unlike drm_gpuvm_sm_map_ops_flags in default mode,
the operation with this flag will never have UNMAPs and
merges, and can be without any final operations.
v2
- use drm_gpuvm_sm_map_ops_create with flags instead of defining new
ops_create (Danilo)
- Add doc (Danilo)
v3
- Fix doc
- Fix unmapping check
v4
- Fix mapping for non madvise ops
v5
- Fix mapping (Matthew Brost)
- Rebase on top of struct changes
Cc: Danilo Krummrich <dakr@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Boris Brezillon <bbrezillon@kernel.org>
Cc: <dri-devel@lists.freedesktop.org>
Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/drm_gpuvm.c | 87 +++++++++++++++++++++++++++++++------
include/drm/drm_gpuvm.h | 11 ++++-
2 files changed, 83 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f04d80a3a63b..2aeae8c2296f 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2110,6 +2110,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
{
struct drm_gpuva *va, *next;
u64 req_end = req->va.addr + req->va.range;
+ bool is_madvise_ops = (req->flags & DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE);
+ bool needs_map = !is_madvise_ops;
int ret;
if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
@@ -2122,26 +2124,35 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 range = va->va.range;
u64 end = addr + range;
bool merge = !!va->gem.obj;
+ bool skip_madvise_ops = is_madvise_ops && merge;
+ needs_map = !is_madvise_ops;
if (addr == req->va.addr) {
merge &= obj == req->gem.obj &&
offset == req->gem.offset;
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
continue;
}
if (end > req_end) {
+ if (skip_madvise_ops)
+ break;
+
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = range - req->va.range,
@@ -2156,6 +2167,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops)
+ needs_map = true;
break;
}
} else if (addr < req->va.addr) {
@@ -2173,20 +2187,45 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u.keep = merge;
if (end == req_end) {
+ if (skip_madvise_ops)
+ break;
+
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops)
+ needs_map = true;
+
break;
}
if (end < req_end) {
+ if (skip_madvise_ops)
+ continue;
+
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops) {
+ struct drm_gpuva_op_map map_req = {
+ .va.addr = req->va.addr,
+ .va.range = end - req->va.addr,
+ };
+
+ ret = op_map_cb(ops, priv, &map_req);
+ if (ret)
+ return ret;
+ }
+
continue;
}
if (end > req_end) {
+ if (skip_madvise_ops)
+ break;
+
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = end - req_end,
@@ -2198,6 +2237,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops)
+ needs_map = true;
break;
}
} else if (addr > req->va.addr) {
@@ -2206,20 +2248,29 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req->va.addr);
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
+
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
+
continue;
}
if (end > req_end) {
+ if (skip_madvise_ops)
+ break;
+
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = end - req_end,
@@ -2234,12 +2285,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops) {
+ struct drm_gpuva_op_map map_req = {
+ .va.addr = addr,
+ .va.range = req_end - addr,
+ };
+
+ return op_map_cb(ops, priv, &map_req);
+ }
break;
}
}
}
-
- return op_map_cb(ops, priv, req);
+ return needs_map ? op_map_cb(ops, priv, req) : 0;
}
static int
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 75c616fdc119..a8e9f70501ef 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -811,10 +811,19 @@ enum drm_gpuva_op_type {
};
/** DOC: flags for struct drm_gpuva_op_map
- * %DRM_GPUVM_SM_MAP_OPS_FLAG_NONE DEFAULT split and merge,
+ * %DRM_GPUVM_SM_MAP_OPS_FLAG_NONE: DEFAULT split and merge,
* It cannot be combined with other flags.
+ *
+ * %DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE: This flag is used by
+ * drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the user-provided
+ * range and split the existing non-GEM object VMA if the start or end of
+ * the input range lies within it. The operations can create up to 2 REMAPS
+ * and 2 MAPs. Unlike DRM_GPUVM_SM_MAP_OPS_FLAG_NONE flag, the operation with
+ * this flag will never have UNMAPs and merges, and can be without any final
+ * operations.
*/
#define DRM_GPUVM_SM_MAP_OPS_FLAG_NONE 0
+#define DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE BIT(0)
/**
* struct drm_gpuva_op_map - GPU VA map operation
--
2.34.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
2025-07-30 13:00 ` [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
@ 2025-07-30 23:23 ` kernel test robot
2025-08-05 3:56 ` Matthew Brost
2025-08-05 9:40 ` Danilo Krummrich
2 siblings, 0 replies; 10+ messages in thread
From: kernel test robot @ 2025-07-30 23:23 UTC (permalink / raw)
To: Himal Prasad Ghimiray, intel-xe
Cc: oe-kbuild-all, Matthew Brost, Thomas Hellström,
Boris Brezillon, Danilo Krummrich, Caterina Shablia, Rob Clark,
dri-devel, Himal Prasad Ghimiray
Hi Himal,
kernel test robot noticed the following build warnings:
[auto build test WARNING on next-20250730]
[cannot apply to drm-xe/drm-xe-next linus/master v6.16 v6.16-rc7 v6.16-rc6 v6.16]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Himal-Prasad-Ghimiray/drm-gpuvm-Pass-map-arguments-through-a-struct/20250731-003813
base: next-20250730
patch link: https://lore.kernel.org/r/20250730130050.1001648-2-himal.prasad.ghimiray%40intel.com
patch subject: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
config: i386-buildonly-randconfig-002-20250731 (https://download.01.org/0day-ci/archive/20250731/202507310715.d6MBnXvv-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14+deb12u1) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250731/202507310715.d6MBnXvv-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202507310715.d6MBnXvv-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> Warning: drivers/gpu/drm/drm_gpuvm.c:2471 function parameter 'req' not described in 'drm_gpuvm_sm_map_exec_lock'
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
2025-07-30 13:00 ` [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
2025-07-30 23:23 ` kernel test robot
@ 2025-08-05 3:56 ` Matthew Brost
2025-08-05 5:24 ` Ghimiray, Himal Prasad
2025-08-05 9:40 ` Danilo Krummrich
2 siblings, 1 reply; 10+ messages in thread
From: Matthew Brost @ 2025-08-05 3:56 UTC (permalink / raw)
To: Himal Prasad Ghimiray
Cc: intel-xe, Thomas Hellström, Boris Brezillon,
Danilo Krummrich, Boris Brezillon, Caterina Shablia, Rob Clark,
dri-devel, Danilo Krummrich
On Wed, Jul 30, 2025 at 06:30:26PM +0530, Himal Prasad Ghimiray wrote:
> From: Boris Brezillon <boris.brezillon@collabora.com>
>
> We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
> so, before we do that, let's pass arguments through a struct instead
> of changing each call site every time a new optional argument is added.
>
> v5
> - Use drm_gpuva_op_map—same as drm_gpuvm_map_req (Danilo)
> - Rebase changes for drm_gpuvm_sm_map_exec_lock()
> - Fix kernel-docs
>
> Cc: Danilo Krummrich <dakr@redhat.com>
> Cc: Boris Brezillon <bbrezillon@kernel.org>
> Cc: Caterina Shablia <caterina.shablia@collabora.com>
> Cc: Rob Clark <robin.clark@oss.qualcomm.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Cc: <dri-devel@lists.freedesktop.org>
>
> Acked-by: Danilo Krummrich <dakr@kernel.org> (#v4)
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
> Signed-off-by: Caterina Shablia <caterina.shablia@collabora.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/drm_gpuvm.c | 106 ++++++++++---------------
> drivers/gpu/drm/imagination/pvr_vm.c | 15 ++--
> drivers/gpu/drm/msm/msm_gem_vma.c | 33 ++++++--
> drivers/gpu/drm/nouveau/nouveau_uvmm.c | 11 ++-
> drivers/gpu/drm/panthor/panthor_mmu.c | 13 ++-
> drivers/gpu/drm/xe/xe_vm.c | 13 ++-
> include/drm/drm_gpuvm.h | 10 +--
> 7 files changed, 112 insertions(+), 89 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
> index bbc7fecb6f4a..f04d80a3a63b 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -486,13 +486,18 @@
> * u64 addr, u64 range,
> * struct drm_gem_object *obj, u64 offset)
> * {
> + * struct drm_gpuva_op_map op_map = {
> + * .va.addr = addr,
> + * .va.range = range,
> + * .gem.obj = obj,
> + * .gem.offset = offset,
> + * };
> * struct drm_gpuva_ops *ops;
> * struct drm_gpuva_op *op
> * struct drm_gpuvm_bo *vm_bo;
> *
> * driver_lock_va_space();
> - * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
> - * obj, offset);
> + * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &op_map);
> * if (IS_ERR(ops))
> * return PTR_ERR(ops);
> *
> @@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
>
> static int
> op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
> - u64 addr, u64 range,
> - struct drm_gem_object *obj, u64 offset)
> + const struct drm_gpuva_op_map *req)
> {
> struct drm_gpuva_op op = {};
>
> op.op = DRM_GPUVA_OP_MAP;
> - op.map.va.addr = addr;
> - op.map.va.range = range;
> - op.map.gem.obj = obj;
> - op.map.gem.offset = offset;
> + op.map.va.addr = req->va.addr;
> + op.map.va.range = req->va.range;
> + op.map.gem.obj = req->gem.obj;
> + op.map.gem.offset = req->gem.offset;
>
> return fn->sm_step_map(&op, priv);
> }
> @@ -2102,17 +2106,16 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
> static int
> __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> const struct drm_gpuvm_ops *ops, void *priv,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + const struct drm_gpuva_op_map *req)
> {
> struct drm_gpuva *va, *next;
> - u64 req_end = req_addr + req_range;
> + u64 req_end = req->va.addr + req->va.range;
> int ret;
>
> - if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
> + if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
> return -EINVAL;
>
> - drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
> + drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req->va.addr, req_end) {
> struct drm_gem_object *obj = va->gem.obj;
> u64 offset = va->gem.offset;
> u64 addr = va->va.addr;
> @@ -2120,9 +2123,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> u64 end = addr + range;
> bool merge = !!va->gem.obj;
>
> - if (addr == req_addr) {
> - merge &= obj == req_obj &&
> - offset == req_offset;
> + if (addr == req->va.addr) {
> + merge &= obj == req->gem.obj &&
> + offset == req->gem.offset;
>
> if (end == req_end) {
> ret = op_unmap_cb(ops, priv, va, merge);
> @@ -2141,9 +2144,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> if (end > req_end) {
> struct drm_gpuva_op_map n = {
> .va.addr = req_end,
> - .va.range = range - req_range,
> + .va.range = range - req->va.range,
> .gem.obj = obj,
> - .gem.offset = offset + req_range,
> + .gem.offset = offset + req->va.range,
> };
> struct drm_gpuva_op_unmap u = {
> .va = va,
> @@ -2155,8 +2158,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> return ret;
> break;
> }
> - } else if (addr < req_addr) {
> - u64 ls_range = req_addr - addr;
> + } else if (addr < req->va.addr) {
> + u64 ls_range = req->va.addr - addr;
> struct drm_gpuva_op_map p = {
> .va.addr = addr,
> .va.range = ls_range,
> @@ -2165,8 +2168,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> };
> struct drm_gpuva_op_unmap u = { .va = va };
>
> - merge &= obj == req_obj &&
> - offset + ls_range == req_offset;
> + merge &= obj == req->gem.obj &&
> + offset + ls_range == req->gem.offset;
> u.keep = merge;
>
> if (end == req_end) {
> @@ -2189,7 +2192,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> .va.range = end - req_end,
> .gem.obj = obj,
> .gem.offset = offset + ls_range +
> - req_range,
> + req->va.range,
> };
>
> ret = op_remap_cb(ops, priv, &p, &n, &u);
> @@ -2197,10 +2200,10 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> return ret;
> break;
> }
> - } else if (addr > req_addr) {
> - merge &= obj == req_obj &&
> - offset == req_offset +
> - (addr - req_addr);
> + } else if (addr > req->va.addr) {
> + merge &= obj == req->gem.obj &&
> + offset == req->gem.offset +
> + (addr - req->va.addr);
>
> if (end == req_end) {
> ret = op_unmap_cb(ops, priv, va, merge);
> @@ -2236,9 +2239,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> }
> }
>
> - return op_map_cb(ops, priv,
> - req_addr, req_range,
> - req_obj, req_offset);
> + return op_map_cb(ops, priv, req);
> }
>
> static int
> @@ -2303,10 +2304,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
> * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
> * @gpuvm: the &drm_gpuvm representing the GPU VA space
> * @priv: pointer to a driver private data structure
> - * @req_addr: the start address of the new mapping
> - * @req_range: the range of the new mapping
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @req: ptr to drm_gpuva_op_map struct
> *
> * This function iterates the given range of the GPU VA space. It utilizes the
> * &drm_gpuvm_ops to call back into the driver providing the split and merge
> @@ -2333,8 +2331,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
> */
> int
> drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + const struct drm_gpuva_op_map *req)
> {
> const struct drm_gpuvm_ops *ops = gpuvm->ops;
>
> @@ -2343,9 +2340,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> ops->sm_step_unmap)))
> return -EINVAL;
>
> - return __drm_gpuvm_sm_map(gpuvm, ops, priv,
> - req_addr, req_range,
> - req_obj, req_offset);
> + return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
> }
> EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
>
> @@ -2421,10 +2416,7 @@ static const struct drm_gpuvm_ops lock_ops = {
> * @gpuvm: the &drm_gpuvm representing the GPU VA space
> * @exec: the &drm_exec locking context
> * @num_fences: for newly mapped objects, the # of fences to reserve
> - * @req_addr: the start address of the range to unmap
> - * @req_range: the range of the mappings to unmap
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @op: ptr to drm_gpuva_op_map struct
s/@op/@req/ - Kernel test robot.
Also I believe Danilo's suggestion here was to define drm_gpuvm_map_req
as the argument and then embed drm_gpuva_op_map within
drm_gpuvm_map_req. So in patch [1], flags would be added to
drm_gpuvm_map_req rather than drm_gpuva_op_map.
Matt
[1] https://patchwork.freedesktop.org/patch/666211/?series=149550&rev=5
> *
> * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
> * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
> @@ -2442,12 +2434,10 @@ static const struct drm_gpuvm_ops lock_ops = {
> * for_each_vm_bind_operation {
> * switch (op->op) {
> * case DRIVER_OP_UNMAP:
> - * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
> + * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->va.addr, op->va.range);
> * break;
> * case DRIVER_OP_MAP:
> - * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
> - * op->addr, op->range,
> - * obj, op->obj_offset);
> + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, op);
> * break;
> * }
> *
> @@ -2478,18 +2468,16 @@ static const struct drm_gpuvm_ops lock_ops = {
> int
> drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
> struct drm_exec *exec, unsigned int num_fences,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + struct drm_gpuva_op_map *req)
> {
> - if (req_obj) {
> - int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
> + if (req->gem.obj) {
> + int ret = drm_exec_prepare_obj(exec, req->gem.obj, num_fences);
> if (ret)
> return ret;
> }
>
> return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
> - req_addr, req_range,
> - req_obj, req_offset);
> + req);
>
> }
> EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
> @@ -2611,10 +2599,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
> /**
> * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
> * @gpuvm: the &drm_gpuvm representing the GPU VA space
> - * @req_addr: the start address of the new mapping
> - * @req_range: the range of the new mapping
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @req: ptr to drm_gpuva_op_map struct
> *
> * This function creates a list of operations to perform splitting and merging
> * of existent mapping(s) with the newly requested one.
> @@ -2642,8 +2627,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
> */
> struct drm_gpuva_ops *
> drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *req_obj, u64 req_offset)
> + const struct drm_gpuva_op_map *req)
> {
> struct drm_gpuva_ops *ops;
> struct {
> @@ -2661,9 +2645,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> args.vm = gpuvm;
> args.ops = ops;
>
> - ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
> - req_addr, req_range,
> - req_obj, req_offset);
> + ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
> if (ret)
> goto err_free_ops;
>
> diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
> index 2896fa7501b1..57116709de81 100644
> --- a/drivers/gpu/drm/imagination/pvr_vm.c
> +++ b/drivers/gpu/drm/imagination/pvr_vm.c
> @@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
> static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
> {
> switch (bind_op->type) {
> - case PVR_VM_BIND_TYPE_MAP:
> + case PVR_VM_BIND_TYPE_MAP: {
> + const struct drm_gpuva_op_map map_req = {
> + .va.addr = bind_op->device_addr,
> + .va.range = bind_op->size,
> + .gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
> + .gem.offset = bind_op->offset,
> + };
> +
> return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
> - bind_op, bind_op->device_addr,
> - bind_op->size,
> - gem_from_pvr_gem(bind_op->pvr_obj),
> - bind_op->offset);
> + bind_op, &map_req);
> + }
>
> case PVR_VM_BIND_TYPE_UNMAP:
> return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
> diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
> index 3cd8562a5109..59a9b41bc967 100644
> --- a/drivers/gpu/drm/msm/msm_gem_vma.c
> +++ b/drivers/gpu/drm/msm/msm_gem_vma.c
> @@ -371,6 +371,12 @@ struct drm_gpuva *
> msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
> u64 offset, u64 range_start, u64 range_end)
> {
> + struct drm_gpuva_op_map op_map = {
> + .va.addr = range_start,
> + .va.range = range_end - range_start,
> + .gem.obj = obj,
> + .gem.offset = offset,
> + };
> struct msm_gem_vm *vm = to_msm_vm(gpuvm);
> struct drm_gpuvm_bo *vm_bo;
> struct msm_gem_vma *vma;
> @@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
> if (obj)
> GEM_WARN_ON((range_end - range_start) > obj->size);
>
> - drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
> + drm_gpuva_init_from_op(&vma->base, &op_map);
> vma->mapped = false;
>
> ret = drm_gpuva_insert(&vm->base, &vma->base);
> @@ -1172,10 +1178,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
> break;
> case MSM_VM_BIND_OP_MAP:
> case MSM_VM_BIND_OP_MAP_NULL:
> - ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
> - op->iova, op->range,
> - op->obj, op->obj_offset);
> + {
> + struct drm_gpuva_op_map map_req = {
> + .va.addr = op->iova,
> + .va.range = op->range,
> + .gem.obj = op->obj,
> + .gem.offset = op->obj_offset,
> + };
> +
> + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
> break;
> + }
> default:
> /*
> * lookup_op() should have already thrown an error for
> @@ -1283,9 +1296,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
> arg.flags |= MSM_VMA_DUMP;
> fallthrough;
> case MSM_VM_BIND_OP_MAP_NULL:
> - ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
> - op->range, op->obj, op->obj_offset);
> + {
> + struct drm_gpuva_op_map map_req = {
> + .va.addr = op->iova,
> + .va.range = op->range,
> + .gem.obj = op->obj,
> + .gem.offset = op->obj_offset,
> + };
> +
> + ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
> break;
> + }
> default:
> /*
> * lookup_op() should have already thrown an error for
> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> index ddfc46bc1b3e..b74054b0a476 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> @@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
> break;
> case OP_MAP: {
> struct nouveau_uvma_region *reg;
> + struct drm_gpuva_op_map map_req = {
> + .va.addr = op->va.addr,
> + .va.range = op->va.range,
> + .gem.obj = op->gem.obj,
> + .gem.offset = op->gem.offset,
> + };
>
> reg = nouveau_uvma_region_find_first(uvmm,
> op->va.addr,
> @@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
> }
>
> op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
> - op->va.addr,
> - op->va.range,
> - op->gem.obj,
> - op->gem.offset);
> + &map_req);
> if (IS_ERR(op->ops)) {
> ret = PTR_ERR(op->ops);
> goto unwind_continue;
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 4140f697ba5a..5fd4245a57b9 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -2169,15 +2169,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
> mutex_lock(&vm->op_lock);
> vm->op_ctx = op;
> switch (op_type) {
> - case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
> + case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
> + const struct drm_gpuva_op_map map_req = {
> + .va.addr = op->va.addr,
> + .va.range = op->va.range,
> + .gem.obj = op->map.vm_bo->obj,
> + .gem.offset = op->map.bo_offset,
> + };
> +
> if (vm->unusable) {
> ret = -EINVAL;
> break;
> }
>
> - ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
> - op->map.vm_bo->obj, op->map.bo_offset);
> + ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
> break;
> + }
>
> case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
> ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 432ea325677d..4b3e78745363 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>
> switch (operation) {
> case DRM_XE_VM_BIND_OP_MAP:
> - case DRM_XE_VM_BIND_OP_MAP_USERPTR:
> - ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
> - obj, bo_offset_or_userptr);
> + case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
> + struct drm_gpuva_op_map map_req = {
> + .va.addr = addr,
> + .va.range = range,
> + .gem.obj = obj,
> + .gem.offset = bo_offset_or_userptr,
> + };
> +
> + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
> break;
> + }
> case DRM_XE_VM_BIND_OP_UNMAP:
> ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
> break;
> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> index 274532facfd6..892ffe75a62f 100644
> --- a/include/drm/drm_gpuvm.h
> +++ b/include/drm/drm_gpuvm.h
> @@ -1060,8 +1060,8 @@ struct drm_gpuva_ops {
>
> struct drm_gpuva_ops *
> drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> - u64 addr, u64 range,
> - struct drm_gem_object *obj, u64 offset);
> + const struct drm_gpuva_op_map *req);
> +
> struct drm_gpuva_ops *
> drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
> u64 addr, u64 range);
> @@ -1205,16 +1205,14 @@ struct drm_gpuvm_ops {
> };
>
> int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> - u64 addr, u64 range,
> - struct drm_gem_object *obj, u64 offset);
> + const struct drm_gpuva_op_map *req);
>
> int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
> u64 addr, u64 range);
>
> int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
> struct drm_exec *exec, unsigned int num_fences,
> - u64 req_addr, u64 req_range,
> - struct drm_gem_object *obj, u64 offset);
> + struct drm_gpuva_op_map *req);
>
> int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
> u64 req_addr, u64 req_range);
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
2025-08-05 3:56 ` Matthew Brost
@ 2025-08-05 5:24 ` Ghimiray, Himal Prasad
2025-08-05 10:10 ` Danilo Krummrich
0 siblings, 1 reply; 10+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-08-05 5:24 UTC (permalink / raw)
To: Matthew Brost
Cc: intel-xe, Thomas Hellström, Boris Brezillon,
Danilo Krummrich, Boris Brezillon, Caterina Shablia, Rob Clark,
dri-devel, Danilo Krummrich
On 05-08-2025 09:26, Matthew Brost wrote:
> On Wed, Jul 30, 2025 at 06:30:26PM +0530, Himal Prasad Ghimiray wrote:
>> From: Boris Brezillon <boris.brezillon@collabora.com>
>>
>> We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
>> so, before we do that, let's pass arguments through a struct instead
>> of changing each call site every time a new optional argument is added.
>>
>> v5
>> - Use drm_gpuva_op_map—same as drm_gpuvm_map_req (Danilo)
>> - Rebase changes for drm_gpuvm_sm_map_exec_lock()
>> - Fix kernel-docs
>>
>> Cc: Danilo Krummrich <dakr@redhat.com>
>> Cc: Boris Brezillon <bbrezillon@kernel.org>
>> Cc: Caterina Shablia <caterina.shablia@collabora.com>
>> Cc: Rob Clark <robin.clark@oss.qualcomm.com>
>> Cc: Matthew Brost <matthew.brost@intel.com>
>> Cc: <dri-devel@lists.freedesktop.org>
>>
>> Acked-by: Danilo Krummrich <dakr@kernel.org> (#v4)
>> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
>> Signed-off-by: Caterina Shablia <caterina.shablia@collabora.com>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> ---
>> drivers/gpu/drm/drm_gpuvm.c | 106 ++++++++++---------------
>> drivers/gpu/drm/imagination/pvr_vm.c | 15 ++--
>> drivers/gpu/drm/msm/msm_gem_vma.c | 33 ++++++--
>> drivers/gpu/drm/nouveau/nouveau_uvmm.c | 11 ++-
>> drivers/gpu/drm/panthor/panthor_mmu.c | 13 ++-
>> drivers/gpu/drm/xe/xe_vm.c | 13 ++-
>> include/drm/drm_gpuvm.h | 10 +--
>> 7 files changed, 112 insertions(+), 89 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
>> index bbc7fecb6f4a..f04d80a3a63b 100644
>> --- a/drivers/gpu/drm/drm_gpuvm.c
>> +++ b/drivers/gpu/drm/drm_gpuvm.c
>> @@ -486,13 +486,18 @@
>> * u64 addr, u64 range,
>> * struct drm_gem_object *obj, u64 offset)
>> * {
>> + * struct drm_gpuva_op_map op_map = {
>> + * .va.addr = addr,
>> + * .va.range = range,
>> + * .gem.obj = obj,
>> + * .gem.offset = offset,
>> + * };
>> * struct drm_gpuva_ops *ops;
>> * struct drm_gpuva_op *op
>> * struct drm_gpuvm_bo *vm_bo;
>> *
>> * driver_lock_va_space();
>> - * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
>> - * obj, offset);
>> + * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &op_map);
>> * if (IS_ERR(ops))
>> * return PTR_ERR(ops);
>> *
>> @@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
>>
>> static int
>> op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
>> - u64 addr, u64 range,
>> - struct drm_gem_object *obj, u64 offset)
>> + const struct drm_gpuva_op_map *req)
>> {
>> struct drm_gpuva_op op = {};
>>
>> op.op = DRM_GPUVA_OP_MAP;
>> - op.map.va.addr = addr;
>> - op.map.va.range = range;
>> - op.map.gem.obj = obj;
>> - op.map.gem.offset = offset;
>> + op.map.va.addr = req->va.addr;
>> + op.map.va.range = req->va.range;
>> + op.map.gem.obj = req->gem.obj;
>> + op.map.gem.offset = req->gem.offset;
>>
>> return fn->sm_step_map(&op, priv);
>> }
>> @@ -2102,17 +2106,16 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
>> static int
>> __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> const struct drm_gpuvm_ops *ops, void *priv,
>> - u64 req_addr, u64 req_range,
>> - struct drm_gem_object *req_obj, u64 req_offset)
>> + const struct drm_gpuva_op_map *req)
>> {
>> struct drm_gpuva *va, *next;
>> - u64 req_end = req_addr + req_range;
>> + u64 req_end = req->va.addr + req->va.range;
>> int ret;
>>
>> - if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
>> + if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
>> return -EINVAL;
>>
>> - drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
>> + drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req->va.addr, req_end) {
>> struct drm_gem_object *obj = va->gem.obj;
>> u64 offset = va->gem.offset;
>> u64 addr = va->va.addr;
>> @@ -2120,9 +2123,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> u64 end = addr + range;
>> bool merge = !!va->gem.obj;
>>
>> - if (addr == req_addr) {
>> - merge &= obj == req_obj &&
>> - offset == req_offset;
>> + if (addr == req->va.addr) {
>> + merge &= obj == req->gem.obj &&
>> + offset == req->gem.offset;
>>
>> if (end == req_end) {
>> ret = op_unmap_cb(ops, priv, va, merge);
>> @@ -2141,9 +2144,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> if (end > req_end) {
>> struct drm_gpuva_op_map n = {
>> .va.addr = req_end,
>> - .va.range = range - req_range,
>> + .va.range = range - req->va.range,
>> .gem.obj = obj,
>> - .gem.offset = offset + req_range,
>> + .gem.offset = offset + req->va.range,
>> };
>> struct drm_gpuva_op_unmap u = {
>> .va = va,
>> @@ -2155,8 +2158,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> return ret;
>> break;
>> }
>> - } else if (addr < req_addr) {
>> - u64 ls_range = req_addr - addr;
>> + } else if (addr < req->va.addr) {
>> + u64 ls_range = req->va.addr - addr;
>> struct drm_gpuva_op_map p = {
>> .va.addr = addr,
>> .va.range = ls_range,
>> @@ -2165,8 +2168,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> };
>> struct drm_gpuva_op_unmap u = { .va = va };
>>
>> - merge &= obj == req_obj &&
>> - offset + ls_range == req_offset;
>> + merge &= obj == req->gem.obj &&
>> + offset + ls_range == req->gem.offset;
>> u.keep = merge;
>>
>> if (end == req_end) {
>> @@ -2189,7 +2192,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> .va.range = end - req_end,
>> .gem.obj = obj,
>> .gem.offset = offset + ls_range +
>> - req_range,
>> + req->va.range,
>> };
>>
>> ret = op_remap_cb(ops, priv, &p, &n, &u);
>> @@ -2197,10 +2200,10 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> return ret;
>> break;
>> }
>> - } else if (addr > req_addr) {
>> - merge &= obj == req_obj &&
>> - offset == req_offset +
>> - (addr - req_addr);
>> + } else if (addr > req->va.addr) {
>> + merge &= obj == req->gem.obj &&
>> + offset == req->gem.offset +
>> + (addr - req->va.addr);
>>
>> if (end == req_end) {
>> ret = op_unmap_cb(ops, priv, va, merge);
>> @@ -2236,9 +2239,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>> }
>> }
>>
>> - return op_map_cb(ops, priv,
>> - req_addr, req_range,
>> - req_obj, req_offset);
>> + return op_map_cb(ops, priv, req);
>> }
>>
>> static int
>> @@ -2303,10 +2304,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
>> * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
>> * @gpuvm: the &drm_gpuvm representing the GPU VA space
>> * @priv: pointer to a driver private data structure
>> - * @req_addr: the start address of the new mapping
>> - * @req_range: the range of the new mapping
>> - * @req_obj: the &drm_gem_object to map
>> - * @req_offset: the offset within the &drm_gem_object
>> + * @req: ptr to drm_gpuva_op_map struct
>> *
>> * This function iterates the given range of the GPU VA space. It utilizes the
>> * &drm_gpuvm_ops to call back into the driver providing the split and merge
>> @@ -2333,8 +2331,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
>> */
>> int
>> drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
>> - u64 req_addr, u64 req_range,
>> - struct drm_gem_object *req_obj, u64 req_offset)
>> + const struct drm_gpuva_op_map *req)
>> {
>> const struct drm_gpuvm_ops *ops = gpuvm->ops;
>>
>> @@ -2343,9 +2340,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
>> ops->sm_step_unmap)))
>> return -EINVAL;
>>
>> - return __drm_gpuvm_sm_map(gpuvm, ops, priv,
>> - req_addr, req_range,
>> - req_obj, req_offset);
>> + return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
>> }
>> EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
>>
>> @@ -2421,10 +2416,7 @@ static const struct drm_gpuvm_ops lock_ops = {
>> * @gpuvm: the &drm_gpuvm representing the GPU VA space
>> * @exec: the &drm_exec locking context
>> * @num_fences: for newly mapped objects, the # of fences to reserve
>> - * @req_addr: the start address of the range to unmap
>> - * @req_range: the range of the mappings to unmap
>> - * @req_obj: the &drm_gem_object to map
>> - * @req_offset: the offset within the &drm_gem_object
>> + * @op: ptr to drm_gpuva_op_map struct
>
> s/@op/@req/ - Kernel test robot.
>
> Also I believe Danilo's suggestion here was to define drm_gpuvm_map_req
> as the argument and then embed drm_gpuva_op_map within
> drm_gpuvm_map_req. So in patch [1], flags would be added to
> drm_gpuvm_map_req rather than drm_gpuva_op_map.
>
> Matt
>
> [1] https://patchwork.freedesktop.org/patch/666211/?series=149550&rev=5
Hi Matt,
Thanks for the review. Initially, I considered using drm_gpuvm_map_req
struct instead of passing drm_gpuva_op_map directly to the gpuvm layer,
allowing it to handle split/merge decisions independently.
However, the upcoming patch [1] relies on this flag to determine
driver-side behavior. So at the end drm_gpuva_op_map and
drm_gpuvm_map_req might end up identical. Based on that—and Danilo’s
feedback on this patch [2] I thought It will be better to keep a single
op_map struct with the flag included.
Boris, could you please confirm if the flag will be useful on the driver
side [1]?
[1] https://patchwork.freedesktop.org/patch/662832/?series=151264&rev=2
[2] https://patchwork.freedesktop.org/patch/662819/?series=151264&rev=2
>
>> *
>> * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
>> * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
>> @@ -2442,12 +2434,10 @@ static const struct drm_gpuvm_ops lock_ops = {
>> * for_each_vm_bind_operation {
>> * switch (op->op) {
>> * case DRIVER_OP_UNMAP:
>> - * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
>> + * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->va.addr, op->va.range);
>> * break;
>> * case DRIVER_OP_MAP:
>> - * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
>> - * op->addr, op->range,
>> - * obj, op->obj_offset);
>> + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, op);
>> * break;
>> * }
>> *
>> @@ -2478,18 +2468,16 @@ static const struct drm_gpuvm_ops lock_ops = {
>> int
>> drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
>> struct drm_exec *exec, unsigned int num_fences,
>> - u64 req_addr, u64 req_range,
>> - struct drm_gem_object *req_obj, u64 req_offset)
>> + struct drm_gpuva_op_map *req)
>> {
>> - if (req_obj) {
>> - int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
>> + if (req->gem.obj) {
>> + int ret = drm_exec_prepare_obj(exec, req->gem.obj, num_fences);
>> if (ret)
>> return ret;
>> }
>>
>> return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
>> - req_addr, req_range,
>> - req_obj, req_offset);
>> + req);
>>
>> }
>> EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
>> @@ -2611,10 +2599,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
>> /**
>> * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
>> * @gpuvm: the &drm_gpuvm representing the GPU VA space
>> - * @req_addr: the start address of the new mapping
>> - * @req_range: the range of the new mapping
>> - * @req_obj: the &drm_gem_object to map
>> - * @req_offset: the offset within the &drm_gem_object
>> + * @req: ptr to drm_gpuva_op_map struct
>> *
>> * This function creates a list of operations to perform splitting and merging
>> * of existent mapping(s) with the newly requested one.
>> @@ -2642,8 +2627,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
>> */
>> struct drm_gpuva_ops *
>> drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
>> - u64 req_addr, u64 req_range,
>> - struct drm_gem_object *req_obj, u64 req_offset)
>> + const struct drm_gpuva_op_map *req)
>> {
>> struct drm_gpuva_ops *ops;
>> struct {
>> @@ -2661,9 +2645,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
>> args.vm = gpuvm;
>> args.ops = ops;
>>
>> - ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
>> - req_addr, req_range,
>> - req_obj, req_offset);
>> + ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
>> if (ret)
>> goto err_free_ops;
>>
>> diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
>> index 2896fa7501b1..57116709de81 100644
>> --- a/drivers/gpu/drm/imagination/pvr_vm.c
>> +++ b/drivers/gpu/drm/imagination/pvr_vm.c
>> @@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
>> static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
>> {
>> switch (bind_op->type) {
>> - case PVR_VM_BIND_TYPE_MAP:
>> + case PVR_VM_BIND_TYPE_MAP: {
>> + const struct drm_gpuva_op_map map_req = {
>> + .va.addr = bind_op->device_addr,
>> + .va.range = bind_op->size,
>> + .gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
>> + .gem.offset = bind_op->offset,
>> + };
>> +
>> return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
>> - bind_op, bind_op->device_addr,
>> - bind_op->size,
>> - gem_from_pvr_gem(bind_op->pvr_obj),
>> - bind_op->offset);
>> + bind_op, &map_req);
>> + }
>>
>> case PVR_VM_BIND_TYPE_UNMAP:
>> return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
>> diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
>> index 3cd8562a5109..59a9b41bc967 100644
>> --- a/drivers/gpu/drm/msm/msm_gem_vma.c
>> +++ b/drivers/gpu/drm/msm/msm_gem_vma.c
>> @@ -371,6 +371,12 @@ struct drm_gpuva *
>> msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
>> u64 offset, u64 range_start, u64 range_end)
>> {
>> + struct drm_gpuva_op_map op_map = {
>> + .va.addr = range_start,
>> + .va.range = range_end - range_start,
>> + .gem.obj = obj,
>> + .gem.offset = offset,
>> + };
>> struct msm_gem_vm *vm = to_msm_vm(gpuvm);
>> struct drm_gpuvm_bo *vm_bo;
>> struct msm_gem_vma *vma;
>> @@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
>> if (obj)
>> GEM_WARN_ON((range_end - range_start) > obj->size);
>>
>> - drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
>> + drm_gpuva_init_from_op(&vma->base, &op_map);
>> vma->mapped = false;
>>
>> ret = drm_gpuva_insert(&vm->base, &vma->base);
>> @@ -1172,10 +1178,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
>> break;
>> case MSM_VM_BIND_OP_MAP:
>> case MSM_VM_BIND_OP_MAP_NULL:
>> - ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
>> - op->iova, op->range,
>> - op->obj, op->obj_offset);
>> + {
>> + struct drm_gpuva_op_map map_req = {
>> + .va.addr = op->iova,
>> + .va.range = op->range,
>> + .gem.obj = op->obj,
>> + .gem.offset = op->obj_offset,
>> + };
>> +
>> + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
>> break;
>> + }
>> default:
>> /*
>> * lookup_op() should have already thrown an error for
>> @@ -1283,9 +1296,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
>> arg.flags |= MSM_VMA_DUMP;
>> fallthrough;
>> case MSM_VM_BIND_OP_MAP_NULL:
>> - ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
>> - op->range, op->obj, op->obj_offset);
>> + {
>> + struct drm_gpuva_op_map map_req = {
>> + .va.addr = op->iova,
>> + .va.range = op->range,
>> + .gem.obj = op->obj,
>> + .gem.offset = op->obj_offset,
>> + };
>> +
>> + ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
>> break;
>> + }
>> default:
>> /*
>> * lookup_op() should have already thrown an error for
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
>> index ddfc46bc1b3e..b74054b0a476 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
>> @@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
>> break;
>> case OP_MAP: {
>> struct nouveau_uvma_region *reg;
>> + struct drm_gpuva_op_map map_req = {
>> + .va.addr = op->va.addr,
>> + .va.range = op->va.range,
>> + .gem.obj = op->gem.obj,
>> + .gem.offset = op->gem.offset,
>> + };
>>
>> reg = nouveau_uvma_region_find_first(uvmm,
>> op->va.addr,
>> @@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
>> }
>>
>> op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
>> - op->va.addr,
>> - op->va.range,
>> - op->gem.obj,
>> - op->gem.offset);
>> + &map_req);
>> if (IS_ERR(op->ops)) {
>> ret = PTR_ERR(op->ops);
>> goto unwind_continue;
>> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
>> index 4140f697ba5a..5fd4245a57b9 100644
>> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
>> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
>> @@ -2169,15 +2169,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
>> mutex_lock(&vm->op_lock);
>> vm->op_ctx = op;
>> switch (op_type) {
>> - case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
>> + case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
>> + const struct drm_gpuva_op_map map_req = {
>> + .va.addr = op->va.addr,
>> + .va.range = op->va.range,
>> + .gem.obj = op->map.vm_bo->obj,
>> + .gem.offset = op->map.bo_offset,
>> + };
>> +
>> if (vm->unusable) {
>> ret = -EINVAL;
>> break;
>> }
>>
>> - ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
>> - op->map.vm_bo->obj, op->map.bo_offset);
>> + ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
>> break;
>> + }
>>
>> case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
>> ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index 432ea325677d..4b3e78745363 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>>
>> switch (operation) {
>> case DRM_XE_VM_BIND_OP_MAP:
>> - case DRM_XE_VM_BIND_OP_MAP_USERPTR:
>> - ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
>> - obj, bo_offset_or_userptr);
>> + case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
>> + struct drm_gpuva_op_map map_req = {
>> + .va.addr = addr,
>> + .va.range = range,
>> + .gem.obj = obj,
>> + .gem.offset = bo_offset_or_userptr,
>> + };
>> +
>> + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
>> break;
>> + }
>> case DRM_XE_VM_BIND_OP_UNMAP:
>> ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
>> break;
>> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
>> index 274532facfd6..892ffe75a62f 100644
>> --- a/include/drm/drm_gpuvm.h
>> +++ b/include/drm/drm_gpuvm.h
>> @@ -1060,8 +1060,8 @@ struct drm_gpuva_ops {
>>
>> struct drm_gpuva_ops *
>> drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
>> - u64 addr, u64 range,
>> - struct drm_gem_object *obj, u64 offset);
>> + const struct drm_gpuva_op_map *req);
>> +
>> struct drm_gpuva_ops *
>> drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
>> u64 addr, u64 range);
>> @@ -1205,16 +1205,14 @@ struct drm_gpuvm_ops {
>> };
>>
>> int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
>> - u64 addr, u64 range,
>> - struct drm_gem_object *obj, u64 offset);
>> + const struct drm_gpuva_op_map *req);
>>
>> int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
>> u64 addr, u64 range);
>>
>> int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
>> struct drm_exec *exec, unsigned int num_fences,
>> - u64 req_addr, u64 req_range,
>> - struct drm_gem_object *obj, u64 offset);
>> + struct drm_gpuva_op_map *req);
>>
>> int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
>> u64 req_addr, u64 req_range);
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
2025-07-30 13:00 ` [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
2025-07-30 23:23 ` kernel test robot
2025-08-05 3:56 ` Matthew Brost
@ 2025-08-05 9:40 ` Danilo Krummrich
2025-08-05 11:02 ` Ghimiray, Himal Prasad
2 siblings, 1 reply; 10+ messages in thread
From: Danilo Krummrich @ 2025-08-05 9:40 UTC (permalink / raw)
To: Himal Prasad Ghimiray
Cc: intel-xe, Matthew Brost, Thomas Hellström, Boris Brezillon,
Boris Brezillon, Caterina Shablia, Rob Clark, dri-devel
This series is a bit of a mess on my end, can you please Cc me on all GPUVM
patches with my kernel.org address? Currently, some patches go to my Red Hat
one, some to kernel.org and some to both.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
2025-08-05 5:24 ` Ghimiray, Himal Prasad
@ 2025-08-05 10:10 ` Danilo Krummrich
2025-08-05 11:04 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 10+ messages in thread
From: Danilo Krummrich @ 2025-08-05 10:10 UTC (permalink / raw)
To: Ghimiray, Himal Prasad
Cc: Matthew Brost, intel-xe, Thomas Hellström, Boris Brezillon,
Danilo Krummrich, Boris Brezillon, Caterina Shablia, Rob Clark,
dri-devel
On Tue Aug 5, 2025 at 7:24 AM CEST, Himal Prasad Ghimiray wrote:
> On 05-08-2025 09:26, Matthew Brost wrote:
>> Also I believe Danilo's suggestion here was to define drm_gpuvm_map_req
>> as the argument and then embed drm_gpuva_op_map within
>> drm_gpuvm_map_req. So in patch [1], flags would be added to
>> drm_gpuvm_map_req rather than drm_gpuva_op_map.
>>
>> Matt
>>
>> [1] https://patchwork.freedesktop.org/patch/666211/?series=149550&rev=5
>
> Hi Matt,
>
> Thanks for the review. Initially, I considered using drm_gpuvm_map_req
> struct instead of passing drm_gpuva_op_map directly to the gpuvm layer,
> allowing it to handle split/merge decisions independently.
Generally, we should only have the flags field on struct drm_gpuva_op_map if we
need to let GPUVM pass flags for (re)map operations to drivers.
> However, the upcoming patch [1] relies on this flag to determine
> driver-side behavior. So at the end drm_gpuva_op_map and
> drm_gpuvm_map_req might end up identical. Based on that—and Danilo’s
> feedback on this patch [2] I thought It will be better to keep a single
> op_map struct with the flag included.
Let's leave this to the upcoming patches, we can always adjust. For now, let's
go with what Matt summarized above please.
> Boris, could you please confirm if the flag will be useful on the driver
> side [1]?
>
> [1] https://patchwork.freedesktop.org/patch/662832/?series=151264&rev=2
> [2] https://patchwork.freedesktop.org/patch/662819/?series=151264&rev=2
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
2025-08-05 9:40 ` Danilo Krummrich
@ 2025-08-05 11:02 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 10+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-08-05 11:02 UTC (permalink / raw)
To: Danilo Krummrich
Cc: intel-xe, Matthew Brost, Thomas Hellström, Boris Brezillon,
Boris Brezillon, Caterina Shablia, Rob Clark, dri-devel
On 05-08-2025 15:10, Danilo Krummrich wrote:
> This series is a bit of a mess on my end, can you please Cc me on all GPUVM
> patches with my kernel.org address? Currently, some patches go to my Red
> Hat one, some to kernel.org and some to both.
Sure.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
2025-08-05 10:10 ` Danilo Krummrich
@ 2025-08-05 11:04 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 10+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-08-05 11:04 UTC (permalink / raw)
To: Danilo Krummrich
Cc: Matthew Brost, intel-xe, Thomas Hellström, Boris Brezillon,
Danilo Krummrich, Boris Brezillon, Caterina Shablia, Rob Clark,
dri-devel
On 05-08-2025 15:40, Danilo Krummrich wrote:
> On Tue Aug 5, 2025 at 7:24 AM CEST, Himal Prasad Ghimiray wrote:
>> On 05-08-2025 09:26, Matthew Brost wrote:
>>> Also I believe Danilo's suggestion here was to define drm_gpuvm_map_req
>>> as the argument and then embed drm_gpuva_op_map within
>>> drm_gpuvm_map_req. So in patch [1], flags would be added to
>>> drm_gpuvm_map_req rather than drm_gpuva_op_map.
>>>
>>> Matt
>>>
>>> [1] https://patchwork.freedesktop.org/patch/666211/?series=149550&rev=5
>>
>> Hi Matt,
>>
>> Thanks for the review. Initially, I considered using drm_gpuvm_map_req
>> struct instead of passing drm_gpuva_op_map directly to the gpuvm layer,
>> allowing it to handle split/merge decisions independently.
>
> Generally, we should only have the flags field on struct drm_gpuva_op_map if we
> need to let GPUVM pass flags for (re)map operations to drivers.
>
>> However, the upcoming patch [1] relies on this flag to determine
>> driver-side behavior. So at the end drm_gpuva_op_map and
>> drm_gpuvm_map_req might end up identical. Based on that—and Danilo’s
>> feedback on this patch [2] I thought It will be better to keep a single
>> op_map struct with the flag included.
>
> Let's leave this to the upcoming patches, we can always adjust. For now, let's
> go with what Matt summarized above please.
Sure. Thanks. will update next version to use drm_gpuvm_map_req
>
>> Boris, could you please confirm if the flag will be useful on the driver
>> side [1]?
>>
>> [1] https://patchwork.freedesktop.org/patch/662832/?series=151264&rev=2
>> [2] https://patchwork.freedesktop.org/patch/662819/?series=151264&rev=2
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v5 04/25] drm/gpuvm: Introduce DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE flag
2025-07-30 13:00 ` [PATCH v5 04/25] drm/gpuvm: Introduce DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE flag Himal Prasad Ghimiray
@ 2025-08-05 19:24 ` Matthew Brost
0 siblings, 0 replies; 10+ messages in thread
From: Matthew Brost @ 2025-08-05 19:24 UTC (permalink / raw)
To: Himal Prasad Ghimiray
Cc: intel-xe, Thomas Hellström, Danilo Krummrich,
Boris Brezillon, dri-devel
On Wed, Jul 30, 2025 at 06:30:29PM +0530, Himal Prasad Ghimiray wrote:
> - DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE: This flag is used by
> drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
> user-provided range and split the existing non-GEM object VMA if the
> start or end of the input range lies within it. The operations can
> create up to 2 REMAPS and 2 MAPs. The purpose of this operation is to be
> used by the Xe driver to assign attributes to GPUVMA's within the
> user-defined range. Unlike drm_gpuvm_sm_map_ops_flags in default mode,
> the operation with this flag will never have UNMAPs and
> merges, and can be without any final operations.
>
> v2
> - use drm_gpuvm_sm_map_ops_create with flags instead of defining new
> ops_create (Danilo)
> - Add doc (Danilo)
>
> v3
> - Fix doc
> - Fix unmapping check
>
> v4
> - Fix mapping for non madvise ops
>
> v5
> - Fix mapping (Matthew Brost)
> - Rebase on top of struct changes
>
> Cc: Danilo Krummrich <dakr@redhat.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
I think this patch looks good to me, but will need a rebase based on
discussions in patch #1 of this series.
Going to hold off on the RB until the next rev.
Matt
> Cc: Boris Brezillon <bbrezillon@kernel.org>
> Cc: <dri-devel@lists.freedesktop.org>
> Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/drm_gpuvm.c | 87 +++++++++++++++++++++++++++++++------
> include/drm/drm_gpuvm.h | 11 ++++-
> 2 files changed, 83 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
> index f04d80a3a63b..2aeae8c2296f 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -2110,6 +2110,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> {
> struct drm_gpuva *va, *next;
> u64 req_end = req->va.addr + req->va.range;
> + bool is_madvise_ops = (req->flags & DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE);
> + bool needs_map = !is_madvise_ops;
> int ret;
>
> if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
> @@ -2122,26 +2124,35 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> u64 range = va->va.range;
> u64 end = addr + range;
> bool merge = !!va->gem.obj;
> + bool skip_madvise_ops = is_madvise_ops && merge;
>
> + needs_map = !is_madvise_ops;
> if (addr == req->va.addr) {
> merge &= obj == req->gem.obj &&
> offset == req->gem.offset;
>
> if (end == req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> break;
> }
>
> if (end < req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> continue;
> }
>
> if (end > req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> struct drm_gpuva_op_map n = {
> .va.addr = req_end,
> .va.range = range - req->va.range,
> @@ -2156,6 +2167,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> ret = op_remap_cb(ops, priv, NULL, &n, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops)
> + needs_map = true;
> break;
> }
> } else if (addr < req->va.addr) {
> @@ -2173,20 +2187,45 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> u.keep = merge;
>
> if (end == req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> ret = op_remap_cb(ops, priv, &p, NULL, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops)
> + needs_map = true;
> +
> break;
> }
>
> if (end < req_end) {
> + if (skip_madvise_ops)
> + continue;
> +
> ret = op_remap_cb(ops, priv, &p, NULL, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops) {
> + struct drm_gpuva_op_map map_req = {
> + .va.addr = req->va.addr,
> + .va.range = end - req->va.addr,
> + };
> +
> + ret = op_map_cb(ops, priv, &map_req);
> + if (ret)
> + return ret;
> + }
> +
> continue;
> }
>
> if (end > req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> struct drm_gpuva_op_map n = {
> .va.addr = req_end,
> .va.range = end - req_end,
> @@ -2198,6 +2237,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> ret = op_remap_cb(ops, priv, &p, &n, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops)
> + needs_map = true;
> break;
> }
> } else if (addr > req->va.addr) {
> @@ -2206,20 +2248,29 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> (addr - req->va.addr);
>
> if (end == req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> +
> break;
> }
>
> if (end < req_end) {
> - ret = op_unmap_cb(ops, priv, va, merge);
> - if (ret)
> - return ret;
> + if (!is_madvise_ops) {
> + ret = op_unmap_cb(ops, priv, va, merge);
> + if (ret)
> + return ret;
> + }
> +
> continue;
> }
>
> if (end > req_end) {
> + if (skip_madvise_ops)
> + break;
> +
> struct drm_gpuva_op_map n = {
> .va.addr = req_end,
> .va.range = end - req_end,
> @@ -2234,12 +2285,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> ret = op_remap_cb(ops, priv, NULL, &n, &u);
> if (ret)
> return ret;
> +
> + if (is_madvise_ops) {
> + struct drm_gpuva_op_map map_req = {
> + .va.addr = addr,
> + .va.range = req_end - addr,
> + };
> +
> + return op_map_cb(ops, priv, &map_req);
> + }
> break;
> }
> }
> }
> -
> - return op_map_cb(ops, priv, req);
> + return needs_map ? op_map_cb(ops, priv, req) : 0;
> }
>
> static int
> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> index 75c616fdc119..a8e9f70501ef 100644
> --- a/include/drm/drm_gpuvm.h
> +++ b/include/drm/drm_gpuvm.h
> @@ -811,10 +811,19 @@ enum drm_gpuva_op_type {
> };
>
> /** DOC: flags for struct drm_gpuva_op_map
> - * %DRM_GPUVM_SM_MAP_OPS_FLAG_NONE DEFAULT split and merge,
> + * %DRM_GPUVM_SM_MAP_OPS_FLAG_NONE: DEFAULT split and merge,
> * It cannot be combined with other flags.
> + *
> + * %DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE: This flag is used by
> + * drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the user-provided
> + * range and split the existing non-GEM object VMA if the start or end of
> + * the input range lies within it. The operations can create up to 2 REMAPS
> + * and 2 MAPs. Unlike DRM_GPUVM_SM_MAP_OPS_FLAG_NONE flag, the operation with
> + * this flag will never have UNMAPs and merges, and can be without any final
> + * operations.
> */
> #define DRM_GPUVM_SM_MAP_OPS_FLAG_NONE 0
> +#define DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE BIT(0)
>
> /**
> * struct drm_gpuva_op_map - GPU VA map operation
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2025-08-05 19:25 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20250730130050.1001648-1-himal.prasad.ghimiray@intel.com>
2025-07-30 13:00 ` [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
2025-07-30 23:23 ` kernel test robot
2025-08-05 3:56 ` Matthew Brost
2025-08-05 5:24 ` Ghimiray, Himal Prasad
2025-08-05 10:10 ` Danilo Krummrich
2025-08-05 11:04 ` Ghimiray, Himal Prasad
2025-08-05 9:40 ` Danilo Krummrich
2025-08-05 11:02 ` Ghimiray, Himal Prasad
2025-07-30 13:00 ` [PATCH v5 04/25] drm/gpuvm: Introduce DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE flag Himal Prasad Ghimiray
2025-08-05 19:24 ` Matthew Brost
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).