Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: intel-xe@lists.freedesktop.org,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	"Boris Brezillon" <boris.brezillon@collabora.com>,
	"Danilo Krummrich" <dakr@redhat.com>,
	"Boris Brezillon" <bbrezillon@kernel.org>,
	"Caterina Shablia" <caterina.shablia@collabora.com>,
	"Rob Clark" <robin.clark@oss.qualcomm.com>,
	dri-devel@lists.freedesktop.org,
	"Danilo Krummrich" <dakr@kernel.org>
Subject: Re: [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct
Date: Mon, 4 Aug 2025 20:56:16 -0700	[thread overview]
Message-ID: <aJGA4BPLJRs0RLfz@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20250730130050.1001648-2-himal.prasad.ghimiray@intel.com>

On Wed, Jul 30, 2025 at 06:30:26PM +0530, Himal Prasad Ghimiray wrote:
> From: Boris Brezillon <boris.brezillon@collabora.com>
> 
> We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](),
> so, before we do that, let's pass arguments through a struct instead
> of changing each call site every time a new optional argument is added.
> 
> v5
>  - Use drm_gpuva_op_map—same as drm_gpuvm_map_req (Danilo)
>  - Rebase changes for drm_gpuvm_sm_map_exec_lock()
>  - Fix kernel-docs
> 
> Cc: Danilo Krummrich <dakr@redhat.com>
> Cc: Boris Brezillon <bbrezillon@kernel.org>
> Cc: Caterina Shablia <caterina.shablia@collabora.com>
> Cc: Rob Clark <robin.clark@oss.qualcomm.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Cc: <dri-devel@lists.freedesktop.org>
> 
> Acked-by: Danilo Krummrich <dakr@kernel.org> (#v4)
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
> Signed-off-by: Caterina Shablia <caterina.shablia@collabora.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
>  drivers/gpu/drm/drm_gpuvm.c            | 106 ++++++++++---------------
>  drivers/gpu/drm/imagination/pvr_vm.c   |  15 ++--
>  drivers/gpu/drm/msm/msm_gem_vma.c      |  33 ++++++--
>  drivers/gpu/drm/nouveau/nouveau_uvmm.c |  11 ++-
>  drivers/gpu/drm/panthor/panthor_mmu.c  |  13 ++-
>  drivers/gpu/drm/xe/xe_vm.c             |  13 ++-
>  include/drm/drm_gpuvm.h                |  10 +--
>  7 files changed, 112 insertions(+), 89 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
> index bbc7fecb6f4a..f04d80a3a63b 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -486,13 +486,18 @@
>   *				  u64 addr, u64 range,
>   *				  struct drm_gem_object *obj, u64 offset)
>   *	{
> + *	        struct drm_gpuva_op_map op_map = {
> + *	                 .va.addr = addr,
> + *	                 .va.range = range,
> + *	                 .gem.obj = obj,
> + *	                 .gem.offset = offset,
> + *	           };
>   *		struct drm_gpuva_ops *ops;
>   *		struct drm_gpuva_op *op
>   *		struct drm_gpuvm_bo *vm_bo;
>   *
>   *		driver_lock_va_space();
> - *		ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
> - *						  obj, offset);
> + *		ops = drm_gpuvm_sm_map_ops_create(gpuvm, &op_map);
>   *		if (IS_ERR(ops))
>   *			return PTR_ERR(ops);
>   *
> @@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
>  
>  static int
>  op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
> -	  u64 addr, u64 range,
> -	  struct drm_gem_object *obj, u64 offset)
> +	  const struct drm_gpuva_op_map *req)
>  {
>  	struct drm_gpuva_op op = {};
>  
>  	op.op = DRM_GPUVA_OP_MAP;
> -	op.map.va.addr = addr;
> -	op.map.va.range = range;
> -	op.map.gem.obj = obj;
> -	op.map.gem.offset = offset;
> +	op.map.va.addr = req->va.addr;
> +	op.map.va.range = req->va.range;
> +	op.map.gem.obj = req->gem.obj;
> +	op.map.gem.offset = req->gem.offset;
>  
>  	return fn->sm_step_map(&op, priv);
>  }
> @@ -2102,17 +2106,16 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
>  static int
>  __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  		   const struct drm_gpuvm_ops *ops, void *priv,
> -		   u64 req_addr, u64 req_range,
> -		   struct drm_gem_object *req_obj, u64 req_offset)
> +		   const struct drm_gpuva_op_map *req)
>  {
>  	struct drm_gpuva *va, *next;
> -	u64 req_end = req_addr + req_range;
> +	u64 req_end = req->va.addr + req->va.range;
>  	int ret;
>  
> -	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
> +	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
>  		return -EINVAL;
>  
> -	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
> +	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req->va.addr, req_end) {
>  		struct drm_gem_object *obj = va->gem.obj;
>  		u64 offset = va->gem.offset;
>  		u64 addr = va->va.addr;
> @@ -2120,9 +2123,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  		u64 end = addr + range;
>  		bool merge = !!va->gem.obj;
>  
> -		if (addr == req_addr) {
> -			merge &= obj == req_obj &&
> -				 offset == req_offset;
> +		if (addr == req->va.addr) {
> +			merge &= obj == req->gem.obj &&
> +				 offset == req->gem.offset;
>  
>  			if (end == req_end) {
>  				ret = op_unmap_cb(ops, priv, va, merge);
> @@ -2141,9 +2144,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  			if (end > req_end) {
>  				struct drm_gpuva_op_map n = {
>  					.va.addr = req_end,
> -					.va.range = range - req_range,
> +					.va.range = range - req->va.range,
>  					.gem.obj = obj,
> -					.gem.offset = offset + req_range,
> +					.gem.offset = offset + req->va.range,
>  				};
>  				struct drm_gpuva_op_unmap u = {
>  					.va = va,
> @@ -2155,8 +2158,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  					return ret;
>  				break;
>  			}
> -		} else if (addr < req_addr) {
> -			u64 ls_range = req_addr - addr;
> +		} else if (addr < req->va.addr) {
> +			u64 ls_range = req->va.addr - addr;
>  			struct drm_gpuva_op_map p = {
>  				.va.addr = addr,
>  				.va.range = ls_range,
> @@ -2165,8 +2168,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  			};
>  			struct drm_gpuva_op_unmap u = { .va = va };
>  
> -			merge &= obj == req_obj &&
> -				 offset + ls_range == req_offset;
> +			merge &= obj == req->gem.obj &&
> +				 offset + ls_range == req->gem.offset;
>  			u.keep = merge;
>  
>  			if (end == req_end) {
> @@ -2189,7 +2192,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  					.va.range = end - req_end,
>  					.gem.obj = obj,
>  					.gem.offset = offset + ls_range +
> -						      req_range,
> +						      req->va.range,
>  				};
>  
>  				ret = op_remap_cb(ops, priv, &p, &n, &u);
> @@ -2197,10 +2200,10 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  					return ret;
>  				break;
>  			}
> -		} else if (addr > req_addr) {
> -			merge &= obj == req_obj &&
> -				 offset == req_offset +
> -					   (addr - req_addr);
> +		} else if (addr > req->va.addr) {
> +			merge &= obj == req->gem.obj &&
> +				 offset == req->gem.offset +
> +					   (addr - req->va.addr);
>  
>  			if (end == req_end) {
>  				ret = op_unmap_cb(ops, priv, va, merge);
> @@ -2236,9 +2239,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>  		}
>  	}
>  
> -	return op_map_cb(ops, priv,
> -			 req_addr, req_range,
> -			 req_obj, req_offset);
> +	return op_map_cb(ops, priv, req);
>  }
>  
>  static int
> @@ -2303,10 +2304,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
>   * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
>   * @gpuvm: the &drm_gpuvm representing the GPU VA space
>   * @priv: pointer to a driver private data structure
> - * @req_addr: the start address of the new mapping
> - * @req_range: the range of the new mapping
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @req: ptr to drm_gpuva_op_map struct
>   *
>   * This function iterates the given range of the GPU VA space. It utilizes the
>   * &drm_gpuvm_ops to call back into the driver providing the split and merge
> @@ -2333,8 +2331,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
>   */
>  int
>  drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> -		 u64 req_addr, u64 req_range,
> -		 struct drm_gem_object *req_obj, u64 req_offset)
> +		 const struct drm_gpuva_op_map *req)
>  {
>  	const struct drm_gpuvm_ops *ops = gpuvm->ops;
>  
> @@ -2343,9 +2340,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
>  		       ops->sm_step_unmap)))
>  		return -EINVAL;
>  
> -	return __drm_gpuvm_sm_map(gpuvm, ops, priv,
> -				  req_addr, req_range,
> -				  req_obj, req_offset);
> +	return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
>  }
>  EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
>  
> @@ -2421,10 +2416,7 @@ static const struct drm_gpuvm_ops lock_ops = {
>   * @gpuvm: the &drm_gpuvm representing the GPU VA space
>   * @exec: the &drm_exec locking context
>   * @num_fences: for newly mapped objects, the # of fences to reserve
> - * @req_addr: the start address of the range to unmap
> - * @req_range: the range of the mappings to unmap
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @op: ptr to drm_gpuva_op_map struct

s/@op/@req/ - Kernel test robot.

Also I believe Danilo's suggestion here was to define drm_gpuvm_map_req
as the argument and then embed drm_gpuva_op_map within
drm_gpuvm_map_req. So in patch [1], flags would be added to
drm_gpuvm_map_req rather than drm_gpuva_op_map.

Matt

[1] https://patchwork.freedesktop.org/patch/666211/?series=149550&rev=5

>   *
>   * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
>   * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
> @@ -2442,12 +2434,10 @@ static const struct drm_gpuvm_ops lock_ops = {
>   *            for_each_vm_bind_operation {
>   *                switch (op->op) {
>   *                case DRIVER_OP_UNMAP:
> - *                    ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
> + *                    ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->va.addr, op->va.range);
>   *                    break;
>   *                case DRIVER_OP_MAP:
> - *                    ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
> - *                                                     op->addr, op->range,
> - *                                                     obj, op->obj_offset);
> + *                    ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, op);
>   *                    break;
>   *                }
>   *
> @@ -2478,18 +2468,16 @@ static const struct drm_gpuvm_ops lock_ops = {
>  int
>  drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
>  			   struct drm_exec *exec, unsigned int num_fences,
> -			   u64 req_addr, u64 req_range,
> -			   struct drm_gem_object *req_obj, u64 req_offset)
> +			   struct drm_gpuva_op_map *req)
>  {
> -	if (req_obj) {
> -		int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
> +	if (req->gem.obj) {
> +		int ret = drm_exec_prepare_obj(exec, req->gem.obj, num_fences);
>  		if (ret)
>  			return ret;
>  	}
>  
>  	return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
> -				  req_addr, req_range,
> -				  req_obj, req_offset);
> +				  req);
>  
>  }
>  EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
> @@ -2611,10 +2599,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
>  /**
>   * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
>   * @gpuvm: the &drm_gpuvm representing the GPU VA space
> - * @req_addr: the start address of the new mapping
> - * @req_range: the range of the new mapping
> - * @req_obj: the &drm_gem_object to map
> - * @req_offset: the offset within the &drm_gem_object
> + * @req: ptr to drm_gpuva_op_map struct
>   *
>   * This function creates a list of operations to perform splitting and merging
>   * of existent mapping(s) with the newly requested one.
> @@ -2642,8 +2627,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
>   */
>  struct drm_gpuva_ops *
>  drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> -			    u64 req_addr, u64 req_range,
> -			    struct drm_gem_object *req_obj, u64 req_offset)
> +			    const struct drm_gpuva_op_map *req)
>  {
>  	struct drm_gpuva_ops *ops;
>  	struct {
> @@ -2661,9 +2645,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
>  	args.vm = gpuvm;
>  	args.ops = ops;
>  
> -	ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
> -				 req_addr, req_range,
> -				 req_obj, req_offset);
> +	ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
>  	if (ret)
>  		goto err_free_ops;
>  
> diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
> index 2896fa7501b1..57116709de81 100644
> --- a/drivers/gpu/drm/imagination/pvr_vm.c
> +++ b/drivers/gpu/drm/imagination/pvr_vm.c
> @@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
>  static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
>  {
>  	switch (bind_op->type) {
> -	case PVR_VM_BIND_TYPE_MAP:
> +	case PVR_VM_BIND_TYPE_MAP: {
> +		const struct drm_gpuva_op_map map_req = {
> +			.va.addr = bind_op->device_addr,
> +			.va.range = bind_op->size,
> +			.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
> +			.gem.offset = bind_op->offset,
> +		};
> +
>  		return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
> -					bind_op, bind_op->device_addr,
> -					bind_op->size,
> -					gem_from_pvr_gem(bind_op->pvr_obj),
> -					bind_op->offset);
> +					bind_op, &map_req);
> +	}
>  
>  	case PVR_VM_BIND_TYPE_UNMAP:
>  		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
> diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
> index 3cd8562a5109..59a9b41bc967 100644
> --- a/drivers/gpu/drm/msm/msm_gem_vma.c
> +++ b/drivers/gpu/drm/msm/msm_gem_vma.c
> @@ -371,6 +371,12 @@ struct drm_gpuva *
>  msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
>  		u64 offset, u64 range_start, u64 range_end)
>  {
> +	struct drm_gpuva_op_map op_map = {
> +		.va.addr = range_start,
> +		.va.range = range_end - range_start,
> +		.gem.obj = obj,
> +		.gem.offset = offset,
> +	};
>  	struct msm_gem_vm *vm = to_msm_vm(gpuvm);
>  	struct drm_gpuvm_bo *vm_bo;
>  	struct msm_gem_vma *vma;
> @@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
>  	if (obj)
>  		GEM_WARN_ON((range_end - range_start) > obj->size);
>  
> -	drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
> +	drm_gpuva_init_from_op(&vma->base, &op_map);
>  	vma->mapped = false;
>  
>  	ret = drm_gpuva_insert(&vm->base, &vma->base);
> @@ -1172,10 +1178,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
>  				break;
>  			case MSM_VM_BIND_OP_MAP:
>  			case MSM_VM_BIND_OP_MAP_NULL:
> -				ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
> -							    op->iova, op->range,
> -							    op->obj, op->obj_offset);
> +			{
> +				struct drm_gpuva_op_map map_req = {
> +					.va.addr = op->iova,
> +					.va.range = op->range,
> +					.gem.obj = op->obj,
> +					.gem.offset = op->obj_offset,
> +				};
> +
> +				ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
>  				break;
> +			}
>  			default:
>  				/*
>  				 * lookup_op() should have already thrown an error for
> @@ -1283,9 +1296,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
>  				arg.flags |= MSM_VMA_DUMP;
>  			fallthrough;
>  		case MSM_VM_BIND_OP_MAP_NULL:
> -			ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
> -					       op->range, op->obj, op->obj_offset);
> +		{
> +			struct drm_gpuva_op_map map_req = {
> +				.va.addr = op->iova,
> +				.va.range = op->range,
> +				.gem.obj = op->obj,
> +				.gem.offset = op->obj_offset,
> +			};
> +
> +			ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
>  			break;
> +		}
>  		default:
>  			/*
>  			 * lookup_op() should have already thrown an error for
> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> index ddfc46bc1b3e..b74054b0a476 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> @@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
>  			break;
>  		case OP_MAP: {
>  			struct nouveau_uvma_region *reg;
> +			struct drm_gpuva_op_map map_req = {
> +				.va.addr = op->va.addr,
> +				.va.range = op->va.range,
> +				.gem.obj = op->gem.obj,
> +				.gem.offset = op->gem.offset,
> +			};
>  
>  			reg = nouveau_uvma_region_find_first(uvmm,
>  							     op->va.addr,
> @@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
>  			}
>  
>  			op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
> -							      op->va.addr,
> -							      op->va.range,
> -							      op->gem.obj,
> -							      op->gem.offset);
> +							      &map_req);
>  			if (IS_ERR(op->ops)) {
>  				ret = PTR_ERR(op->ops);
>  				goto unwind_continue;
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 4140f697ba5a..5fd4245a57b9 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -2169,15 +2169,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
>  	mutex_lock(&vm->op_lock);
>  	vm->op_ctx = op;
>  	switch (op_type) {
> -	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
> +	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
> +		const struct drm_gpuva_op_map map_req = {
> +			.va.addr = op->va.addr,
> +			.va.range = op->va.range,
> +			.gem.obj = op->map.vm_bo->obj,
> +			.gem.offset = op->map.bo_offset,
> +		};
> +
>  		if (vm->unusable) {
>  			ret = -EINVAL;
>  			break;
>  		}
>  
> -		ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
> -				       op->map.vm_bo->obj, op->map.bo_offset);
> +		ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
>  		break;
> +	}
>  
>  	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
>  		ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 432ea325677d..4b3e78745363 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>  
>  	switch (operation) {
>  	case DRM_XE_VM_BIND_OP_MAP:
> -	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
> -		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
> -						  obj, bo_offset_or_userptr);
> +	case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
> +		struct drm_gpuva_op_map map_req = {
> +			.va.addr = addr,
> +			.va.range = range,
> +			.gem.obj = obj,
> +			.gem.offset = bo_offset_or_userptr,
> +		};
> +
> +		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
>  		break;
> +	}
>  	case DRM_XE_VM_BIND_OP_UNMAP:
>  		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
>  		break;
> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> index 274532facfd6..892ffe75a62f 100644
> --- a/include/drm/drm_gpuvm.h
> +++ b/include/drm/drm_gpuvm.h
> @@ -1060,8 +1060,8 @@ struct drm_gpuva_ops {
>  
>  struct drm_gpuva_ops *
>  drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
> -			    u64 addr, u64 range,
> -			    struct drm_gem_object *obj, u64 offset);
> +			    const struct drm_gpuva_op_map *req);
> +
>  struct drm_gpuva_ops *
>  drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
>  			      u64 addr, u64 range);
> @@ -1205,16 +1205,14 @@ struct drm_gpuvm_ops {
>  };
>  
>  int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
> -		     u64 addr, u64 range,
> -		     struct drm_gem_object *obj, u64 offset);
> +		     const struct drm_gpuva_op_map *req);
>  
>  int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
>  		       u64 addr, u64 range);
>  
>  int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
>  			  struct drm_exec *exec, unsigned int num_fences,
> -			  u64 req_addr, u64 req_range,
> -			  struct drm_gem_object *obj, u64 offset);
> +			  struct drm_gpuva_op_map *req);
>  
>  int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
>  				 u64 req_addr, u64 req_range);
> -- 
> 2.34.1
> 

  parent reply	other threads:[~2025-08-05  3:56 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-30 13:00 [PATCH v5 00/25] MADVISE FOR XE Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 01/25] drm/gpuvm: Pass map arguments through a struct Himal Prasad Ghimiray
2025-07-30 23:23   ` kernel test robot
2025-08-05  3:56   ` Matthew Brost [this message]
2025-08-05  5:24     ` Ghimiray, Himal Prasad
2025-08-05 10:10       ` Danilo Krummrich
2025-08-05 11:04         ` Ghimiray, Himal Prasad
2025-08-05  9:40   ` Danilo Krummrich
2025-08-05 11:02     ` Ghimiray, Himal Prasad
2025-07-30 13:00 ` [PATCH v5 02/25] drm/gpuvm: Kill drm_gpuva_init() Himal Prasad Ghimiray
2025-08-05  3:45   ` Matthew Brost
2025-08-05  9:35   ` Danilo Krummrich
2025-07-30 13:00 ` [PATCH v5 03/25] drm/gpuvm: Support flags in drm_gpuva_op_map Himal Prasad Ghimiray
2025-08-05  3:58   ` Matthew Brost
2025-08-05 11:05     ` Ghimiray, Himal Prasad
2025-07-30 13:00 ` [PATCH v5 04/25] drm/gpuvm: Introduce DRM_GPUVM_SM_MAP_OPS_FLAG_SPLIT_MADVISE flag Himal Prasad Ghimiray
2025-08-05 19:24   ` Matthew Brost
2025-07-30 13:00 ` [PATCH v5 05/25] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 06/25] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 07/25] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 08/25] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 09/25] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 10/25] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 11/25] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
2025-08-05  4:00   ` Matthew Brost
2025-07-30 13:00 ` [PATCH v5 12/25] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 13/25] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-08-05  4:43   ` Matthew Brost
2025-07-30 13:00 ` [PATCH v5 14/25] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
2025-08-05 20:03   ` Matthew Brost
2025-08-06  5:30     ` Ghimiray, Himal Prasad
2025-08-05 20:10   ` Matthew Brost
2025-08-06  5:29     ` Ghimiray, Himal Prasad
2025-07-30 13:00 ` [PATCH v5 15/25] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 16/25] drm/xe/svm: Support DRM_XE_SVM_MEM_RANGE_ATTR_PAT memory attribute Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 17/25] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 18/25] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 19/25] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 20/25] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
2025-08-05 20:06   ` Matthew Brost
2025-07-30 13:00 ` [PATCH v5 21/25] drm/xe/madvise: Skip vma invalidation if mem attr are unchanged Himal Prasad Ghimiray
2025-07-30 20:57   ` kernel test robot
2025-07-30 13:00 ` [PATCH v5 22/25] drm/xe/vm: Add helper to check for default VMA memory attributes Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 23/25] drm/xe: Reset VMA attributes to default in SVM garbage collector Himal Prasad Ghimiray
2025-08-06  4:06   ` Matthew Brost
2025-08-06  5:32     ` Ghimiray, Himal Prasad
2025-07-30 13:00 ` [PATCH v5 24/25] drm/xe: Enable madvise ioctl for xe Himal Prasad Ghimiray
2025-07-30 13:00 ` [PATCH v5 25/25] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
2025-08-05 19:29   ` Matthew Brost
2025-07-30 14:20 ` ✗ CI.checkpatch: warning for MADVISE FOR XE (rev5) Patchwork
2025-07-30 14:21 ` ✓ CI.KUnit: success " Patchwork
2025-07-30 14:36 ` ✗ CI.checksparse: warning " Patchwork
2025-07-30 15:36 ` ✓ Xe.CI.BAT: success " Patchwork
2025-07-30 17:51 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aJGA4BPLJRs0RLfz@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=bbrezillon@kernel.org \
    --cc=boris.brezillon@collabora.com \
    --cc=caterina.shablia@collabora.com \
    --cc=dakr@kernel.org \
    --cc=dakr@redhat.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=robin.clark@oss.qualcomm.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox