Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: Matthew Brost <matthew.brost@intel.com>,
	intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: himal.prasad.ghimiray@intel.com, apopple@nvidia.com,
	airlied@gmail.com,  simona.vetter@ffwll.ch,
	felix.kuehling@amd.com, dakr@kernel.org
Subject: Re: [PATCH v4 16/33] drm/xe: Add unbind to SVM garbage collector
Date: Fri, 07 Feb 2025 13:55:58 +0100	[thread overview]
Message-ID: <912b7cd94d00d84525d58bb3cebc7b82ad0129b2.camel@linux.intel.com> (raw)
In-Reply-To: <20250129195212.745731-17-matthew.brost@intel.com>

On Wed, 2025-01-29 at 11:51 -0800, Matthew Brost wrote:
> Add unbind to SVM garbage collector. To facilitate add unbind support
> function to VM layer which unbinds a SVM range. Also teach PY layer

Should it be
s/PY layer/the PT layer/ ?

Also see below regarding accessors,

Thanks,
Thomas


> to
> understand unbinds of SVM ranges.
> 
> v3:
>  - s/INVALID_VMA/XE_INVALID_VMA (Thomas)
>  - Kernel doc (Thomas)
>  - New GPU SVM range structure (Thomas)
>  - s/DRM_GPUVA_OP_USER/DRM_GPUVA_OP_DRIVER (Thomas)
> v4:
>  - Use xe_vma_op_unmap_range (Himal)
> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_pt.c       | 84 ++++++++++++++++++++++++++----
> --
>  drivers/gpu/drm/xe/xe_svm.c      |  9 +++-
>  drivers/gpu/drm/xe/xe_vm.c       | 83
> +++++++++++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_vm.h       |  2 +
>  drivers/gpu/drm/xe/xe_vm_types.h | 12 ++++-
>  5 files changed, 172 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index cb63596dbfbf..f8d06c70f77d 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -957,10 +957,16 @@ static void xe_pt_cancel_bind(struct xe_vma
> *vma,
>  	}
>  }
>  
> +#define XE_INVALID_VMA	((struct xe_vma *)(0xdeaddeadull))
> +
>  static void xe_pt_commit_locks_assert(struct xe_vma *vma)
>  {
> -	struct xe_vm *vm = xe_vma_vm(vma);
> +	struct xe_vm *vm;
>  
> +	if (vma == XE_INVALID_VMA)
> +		return;
> +
> +	vm = xe_vma_vm(vma);
>  	lockdep_assert_held(&vm->lock);
>  
>  	if (!xe_vma_has_no_bo(vma))
> @@ -986,7 +992,8 @@ static void xe_pt_commit(struct xe_vma *vma,
>  		for (j = 0; j < entries[i].qwords; j++) {
>  			struct xe_pt *oldpte =
> entries[i].pt_entries[j].pt;
>  
> -			xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags,
> deferred);
> +			xe_pt_destroy(oldpte, (vma ==
> XE_INVALID_VMA) ? 0 :
> +				      xe_vma_vm(vma)->flags,
> deferred);
>  		}
>  	}
>  }
> @@ -1419,6 +1426,9 @@ static int xe_pt_svm_pre_commit(struct
> xe_migrate_pt_update *pt_update)
>  	list_for_each_entry(op, &vops->list, link) {
>  		struct xe_svm_range *range = op->map_range.range;
>  
> +		if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
> +			continue;
> +
>  		xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op-
> >map_range.vma));
>  		xe_assert(vm->xe, op->subop ==
> XE_VMA_SUBOP_MAP_RANGE);
>  
> @@ -1616,7 +1626,9 @@ static const struct xe_pt_walk_ops
> xe_pt_stage_unbind_ops = {
>   * xe_pt_stage_unbind() - Build page-table update structures for an
> unbind
>   * operation
>   * @tile: The tile we're unbinding for.
> + * @vm: The vm
>   * @vma: The vma we're unbinding.
> + * @range: The range we're unbinding.
>   * @entries: Caller-provided storage for the update structures.
>   *
>   * Builds page-table update structures for an unbind operation. The
> function
> @@ -1626,9 +1638,14 @@ static const struct xe_pt_walk_ops
> xe_pt_stage_unbind_ops = {
>   *
>   * Return: The number of entries used.
>   */
> -static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct
> xe_vma *vma,
> +static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
> +				       struct xe_vm *vm,
> +				       struct xe_vma *vma,
> +				       struct xe_svm_range *range,
>  				       struct xe_vm_pgtable_update
> *entries)
>  {
> +	u64 start = range ? range->base.itree.start :
> xe_vma_start(vma);
> +	u64 end = range ? range->base.itree.last + 1 :
> xe_vma_end(vma);

Perhaps a code-wide comment is in place here, To use accessors

static inline unsigned long xe_svm_range_start(struct xe_svm_range);
static inline unsigned long xe_svm_range_end(struct xe_svm_range);

to avoid open-coding range->base.itree.xxxx. It's pretty frequent in
the code.


>  	struct xe_pt_stage_unbind_walk xe_walk = {
>  		.base = {
>  			.ops = &xe_pt_stage_unbind_ops,
> @@ -1636,14 +1653,14 @@ static unsigned int xe_pt_stage_unbind(struct
> xe_tile *tile, struct xe_vma *vma,
>  			.max_level = XE_PT_HIGHEST_LEVEL,
>  		},
>  		.tile = tile,
> -		.modified_start = xe_vma_start(vma),
> -		.modified_end = xe_vma_end(vma),
> +		.modified_start = start,
> +		.modified_end = end,
>  		.wupd.entries = entries,
>  	};
> -	struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
> +	struct xe_pt *pt = vm->pt_root[tile->id];
>  
> -	(void)xe_pt_walk_shared(&pt->base, pt->level,
> xe_vma_start(vma),
> -				xe_vma_end(vma), &xe_walk.base);
> +	(void)xe_pt_walk_shared(&pt->base, pt->level, start, end,
> +				&xe_walk.base);
>  
>  	return xe_walk.wupd.num_used_entries;
>  }
> @@ -1885,13 +1902,6 @@ static int unbind_op_prepare(struct xe_tile
> *tile,
>  	       "Preparing unbind, with range [%llx...%llx)\n",
>  	       xe_vma_start(vma), xe_vma_end(vma) - 1);
>  
> -	/*
> -	 * Wait for invalidation to complete. Can corrupt internal
> page table
> -	 * state if an invalidation is running while preparing an
> unbind.
> -	 */
> -	if (xe_vma_is_userptr(vma) &&
> xe_vm_in_fault_mode(xe_vma_vm(vma)))
> -		mmu_interval_read_begin(&to_userptr_vma(vma)-
> >userptr.notifier);
> -
>  	pt_op->vma = vma;
>  	pt_op->bind = false;
>  	pt_op->rebind = false;
> @@ -1900,7 +1910,8 @@ static int unbind_op_prepare(struct xe_tile
> *tile,
>  	if (err)
>  		return err;
>  
> -	pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op-
> >entries);
> +	pt_op->num_entries = xe_pt_stage_unbind(tile,
> xe_vma_vm(vma),
> +						vma, NULL, pt_op-
> >entries);
>  
>  	xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
>  				pt_op->num_entries, false);
> @@ -1915,6 +1926,42 @@ static int unbind_op_prepare(struct xe_tile
> *tile,
>  	return 0;
>  }
>  
> +static int unbind_range_prepare(struct xe_vm *vm,
> +				struct xe_tile *tile,
> +				struct xe_vm_pgtable_update_ops
> *pt_update_ops,
> +				struct xe_svm_range *range)
> +{
> +	u32 current_op = pt_update_ops->current_op;
> +	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops-
> >ops[current_op];
> +
> +	if (!(range->tile_present & BIT(tile->id)))
> +		return 0;
> +
> +	vm_dbg(&vm->xe->drm,
> +	       "Preparing unbind, with range [%lx...%lx)\n",
> +	       range->base.itree.start, range->base.itree.last);
> +
> +	pt_op->vma = XE_INVALID_VMA;
> +	pt_op->bind = false;
> +	pt_op->rebind = false;
> +
> +	pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL,
> range,
> +						pt_op->entries);
> +
> +	xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
> +				pt_op->num_entries, false);
> +	xe_pt_update_ops_rfence_interval(pt_update_ops, range-
> >base.itree.start,
> +					 range->base.itree.last +
> 1);
> +	++pt_update_ops->current_op;
> +	pt_update_ops->needs_svm_lock = true;
> +	pt_update_ops->needs_invalidation = true;
> +
> +	xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
> +				    pt_op->num_entries);
> +
> +	return 0;
> +}
> +
>  static int op_prepare(struct xe_vm *vm,
>  		      struct xe_tile *tile,
>  		      struct xe_vm_pgtable_update_ops
> *pt_update_ops,
> @@ -1982,6 +2029,9 @@ static int op_prepare(struct xe_vm *vm,
>  			err = bind_range_prepare(vm, tile,
> pt_update_ops,
>  						 op->map_range.vma,
>  						 op-
> >map_range.range);
> +		} else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
> +			err = unbind_range_prepare(vm, tile,
> pt_update_ops,
> +						   op-
> >unmap_range.range);
>  		}
>  		break;
>  	default:
> @@ -2171,6 +2221,8 @@ static void op_commit(struct xe_vm *vm,
>  		if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
>  			op->map_range.range->tile_present |=
> BIT(tile->id);
>  			op->map_range.range->tile_invalidated &=
> ~BIT(tile->id);
> +		} else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
> +			op->unmap_range.range->tile_present &=
> ~BIT(tile->id);
>  		}
>  		break;
>  	}
> diff --git a/drivers/gpu/drm/xe/xe_svm.c
> b/drivers/gpu/drm/xe/xe_svm.c
> index 3788196b2925..03c5cbcacb0e 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -216,7 +216,14 @@ static void xe_svm_invalidate(struct drm_gpusvm
> *gpusvm,
>  static int __xe_svm_garbage_collector(struct xe_vm *vm,
>  				      struct xe_svm_range *range)
>  {
> -	/* TODO: Do unbind */
> +	struct dma_fence *fence;
> +
> +	xe_vm_lock(vm, false);
> +	fence = xe_vm_range_unbind(vm, range);
> +	xe_vm_unlock(vm);
> +	if (IS_ERR(fence))
> +		return PTR_ERR(fence);
> +	dma_fence_put(fence);
>  
>  	drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
>  
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index bdc9b75e0aee..6fa446884955 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -984,6 +984,89 @@ struct dma_fence *xe_vm_range_rebind(struct
> xe_vm *vm,
>  	return fence;
>  }
>  
> +static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
> +					struct xe_svm_range *range)
> +{
> +	INIT_LIST_HEAD(&op->link);
> +	op->tile_mask = range->tile_present;
> +	op->base.op = DRM_GPUVA_OP_DRIVER;
> +	op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
> +	op->unmap_range.range = range;
> +}
> +
> +static int
> +xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
> +			   struct xe_svm_range *range)
> +{
> +	struct xe_vma_op *op;
> +
> +	op = kzalloc(sizeof(*op), GFP_KERNEL);
> +	if (!op)
> +		return -ENOMEM;
> +
> +	xe_vm_populate_range_unbind(op, range);
> +	list_add_tail(&op->link, &vops->list);
> +	xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
> +
> +	return 0;
> +}
> +
> +/**
> + * xe_vm_range_unbind() - VM range unbind
> + * @vm: The VM which the range belongs to.
> + * @range: SVM range to rebind.
> + *
> + * Unbind SVM range removing the GPU page tables for the range.
> + *
> + * Return: dma fence for unbind to signal completion on succees,
> ERR_PTR on
> + * failure
> + */
> +struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
> +				     struct xe_svm_range *range)
> +{
> +	struct dma_fence *fence = NULL;
> +	struct xe_vma_ops vops;
> +	struct xe_vma_op *op, *next_op;
> +	struct xe_tile *tile;
> +	u8 id;
> +	int err;
> +
> +	lockdep_assert_held(&vm->lock);
> +	xe_vm_assert_held(vm);
> +	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
> +
> +	if (!range->tile_present)
> +		return dma_fence_get_stub();
> +
> +	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
> +	for_each_tile(tile, vm->xe, id) {
> +		vops.pt_update_ops[id].wait_vm_bookkeep = true;
> +		vops.pt_update_ops[tile->id].q =
> +			xe_tile_migrate_exec_queue(tile);
> +	}
> +
> +	err = xe_vm_ops_add_range_unbind(&vops, range);
> +	if (err)
> +		return ERR_PTR(err);
> +
> +	err = xe_vma_ops_alloc(&vops, false);
> +	if (err) {
> +		fence = ERR_PTR(err);
> +		goto free_ops;
> +	}
> +
> +	fence = ops_execute(vm, &vops);
> +
> +free_ops:
> +	list_for_each_entry_safe(op, next_op, &vops.list, link) {
> +		list_del(&op->link);
> +		kfree(op);
> +	}
> +	xe_vma_ops_fini(&vops);
> +
> +	return fence;
> +}
> +
>  static void xe_vma_free(struct xe_vma *vma)
>  {
>  	if (xe_vma_is_userptr(vma))
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index a82fe743bbe0..3b6316dd9fd6 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -221,6 +221,8 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm
> *vm,
>  				     struct xe_vma *vma,
>  				     struct xe_svm_range *range,
>  				     u8 tile_mask);
> +struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
> +				     struct xe_svm_range *range);
>  
>  int xe_vm_invalidate_vma(struct xe_vma *vma);
>  
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> b/drivers/gpu/drm/xe/xe_vm_types.h
> index 576316729249..aaba9e5acfb7 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -361,6 +361,12 @@ struct xe_vma_op_map_range {
>  	struct xe_svm_range *range;
>  };
>  
> +/** struct xe_vma_op_unmap_range - VMA unmap range operation */
> +struct xe_vma_op_unmap_range {
> +	/** @range: SVM range to unmap */
> +	struct xe_svm_range *range;
> +};
> +
>  /** enum xe_vma_op_flags - flags for VMA operation */
>  enum xe_vma_op_flags {
>  	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
> @@ -375,6 +381,8 @@ enum xe_vma_op_flags {
>  enum xe_vma_subop {
>  	/** @XE_VMA_SUBOP_MAP_RANGE: Map range */
>  	XE_VMA_SUBOP_MAP_RANGE,
> +	/** @XE_VMA_SUBOP_UNMAP_RANGE: Unmap range */
> +	XE_VMA_SUBOP_UNMAP_RANGE,
>  };
>  
>  /** struct xe_vma_op - VMA operation */
> @@ -397,8 +405,10 @@ struct xe_vma_op {
>  		struct xe_vma_op_remap remap;
>  		/** @prefetch: VMA prefetch operation specific data
> */
>  		struct xe_vma_op_prefetch prefetch;
> -		/** @map: VMA map range operation specific data */
> +		/** @map_range: VMA map range operation specific
> data */
>  		struct xe_vma_op_map_range map_range;
> +		/** @unmap_range: VMA unmap range operation specific
> data */
> +		struct xe_vma_op_unmap_range unmap_range;
>  	};
>  };
>  


  reply	other threads:[~2025-02-07 12:56 UTC|newest]

Thread overview: 103+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-29 19:51 [PATCH v4 00/33] Introduce GPU SVM and Xe SVM implementation Matthew Brost
2025-01-29 19:51 ` [PATCH v4 01/33] drm/xe: Retry BO allocation Matthew Brost
2025-01-29 19:51 ` [PATCH v4 02/33] mm/migrate: Add migrate_device_pfns Matthew Brost
2025-01-31  5:24   ` Alistair Popple
2025-01-31  7:47   ` Gwan-gyeong Mun
2025-02-04 22:17     ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 03/33] mm/migrate: Trylock device page in do_swap_page Matthew Brost
2025-01-29 19:51 ` [PATCH v4 04/33] drm/pagemap: Add DRM pagemap Matthew Brost
2025-02-07  8:34   ` Thomas Hellström
2025-02-10 18:41     ` Matthew Brost
2025-02-11 16:03       ` Thomas Hellström
2025-02-11 18:17         ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 05/33] drm/xe/bo: Introduce xe_bo_put_async Matthew Brost
2025-01-30  8:49   ` Thomas Hellström
2025-01-30 16:26     ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 06/33] drm/gpusvm: Add support for GPU Shared Virtual Memory Matthew Brost
2025-01-30  9:13   ` Thomas Hellström
2025-01-30 11:17   ` Matthew Auld
2025-01-30 13:13     ` Gwan-gyeong Mun
2025-01-30 16:42       ` Matthew Brost
2025-02-07  9:06   ` Thomas Hellström
2025-02-10 17:31     ` Matthew Brost
2025-02-11 15:17       ` Thomas Hellström
2025-02-11 18:05         ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 07/33] drm/xe: Select DRM_GPUSVM Kconfig Matthew Brost
2025-02-07  3:18   ` Ghimiray, Himal Prasad
2025-02-07  9:30   ` Thomas Hellström
2025-01-29 19:51 ` [PATCH v4 08/33] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR flag Matthew Brost
2025-02-07  9:37   ` Thomas Hellström
2025-02-07 12:11   ` Ghimiray, Himal Prasad
2025-02-07 13:47     ` Upadhyay, Tejas
2025-02-10 19:08       ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 09/33] drm/xe: Add SVM init / close / fini to faulting VMs Matthew Brost
2025-02-07  3:24   ` Ghimiray, Himal Prasad
2025-02-07  9:43   ` Thomas Hellström
2025-01-29 19:51 ` [PATCH v4 10/33] drm/xe: Add dma_addr res cursor Matthew Brost
2025-02-10 19:11   ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 11/33] drm/xe: Nuke VM's mapping upon close Matthew Brost
2025-01-30 10:50   ` Matthew Auld
2025-01-30 16:28     ` Matthew Brost
2025-02-07 10:15   ` Thomas Hellström
2025-02-10 19:16     ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 12/33] drm/xe: Add SVM range invalidation and page fault handler Matthew Brost
2025-02-07 10:32   ` Thomas Hellström
2025-01-29 19:51 ` [PATCH v4 13/33] drm/gpuvm: Add DRM_GPUVA_OP_DRIVER Matthew Brost
2025-02-07 10:36   ` Thomas Hellström
2025-01-29 19:51 ` [PATCH v4 14/33] drm/xe: Add (re)bind to SVM page fault handler Matthew Brost
2025-01-29 19:51 ` [PATCH v4 15/33] drm/xe: Add SVM garbage collector Matthew Brost
2025-02-07 12:42   ` Thomas Hellström
2025-01-29 19:51 ` [PATCH v4 16/33] drm/xe: Add unbind to " Matthew Brost
2025-02-07 12:55   ` Thomas Hellström [this message]
2025-02-10 21:17     ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 17/33] drm/xe: Do not allow CPU address mirror VMA unbind if the GPU has bindings Matthew Brost
2025-02-07 13:01   ` Thomas Hellström
2025-01-29 19:51 ` [PATCH v4 18/33] drm/xe: Enable CPU address mirror uAPI Matthew Brost
2025-02-07 13:02   ` Thomas Hellström
2025-01-29 19:51 ` [PATCH v4 19/33] drm/xe/uapi: Add DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR Matthew Brost
2025-02-07 11:35   ` Ghimiray, Himal Prasad
2025-02-07 11:35   ` Ghimiray, Himal Prasad
2025-02-07 13:04   ` Thomas Hellström
2025-02-07 13:43     ` Upadhyay, Tejas
2025-02-10 19:15       ` Matthew Brost
2025-01-29 19:51 ` [PATCH v4 20/33] drm/xe: Add migrate layer functions for SVM support Matthew Brost
2025-02-07 13:07   ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 21/33] drm/xe: Add SVM device memory mirroring Matthew Brost
2025-02-07 13:29   ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 22/33] drm/xe: Add drm_gpusvm_devmem to xe_bo Matthew Brost
2025-01-29 19:52 ` [PATCH v4 23/33] drm/xe: Add drm_pagemap ops to SVM Matthew Brost
2025-01-30 10:54   ` Matthew Auld
2025-01-30 13:24     ` Gwan-gyeong Mun
2025-01-30 16:24       ` Matthew Brost
2025-01-29 19:52 ` [PATCH v4 24/33] drm/xe: Add GPUSVM device memory copy vfunc functions Matthew Brost
2025-02-07 13:32   ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 25/33] drm/xe: Add Xe SVM populate_devmem_pfn GPU SVM vfunc Matthew Brost
2025-01-29 19:52 ` [PATCH v4 26/33] drm/xe: Add Xe SVM devmem_release " Matthew Brost
2025-01-29 19:52 ` [PATCH v4 27/33] drm/xe: Add BO flags required for SVM Matthew Brost
2025-02-07 13:54   ` Thomas Hellström
2025-02-11 19:19     ` Matthew Brost
2025-02-11 19:36       ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 28/33] drm/xe: Add SVM VRAM migration Matthew Brost
2025-01-30 14:22   ` Matthew Auld
2025-01-30 16:32     ` Matthew Brost
2025-01-30 16:41       ` Thomas Hellström
2025-01-30 16:56       ` Matthew Auld
2025-01-30 17:31         ` Matthew Brost
2025-01-30 18:51           ` Thomas Hellström
2025-01-31 17:30             ` Matthew Brost
2025-02-07 13:57   ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 29/33] drm/xe: Basic SVM BO eviction Matthew Brost
2025-02-07 14:45   ` Thomas Hellström
2025-02-11 19:21     ` Matthew Brost
2025-01-29 19:52 ` [PATCH v4 30/33] drm/xe: Add SVM debug Matthew Brost
2025-02-07 14:46   ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 31/33] drm/xe: Add modparam for SVM notifier size Matthew Brost
2025-02-07 14:48   ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 32/33] drm/xe: Add always_migrate_to_vram modparam Matthew Brost
2025-02-07 14:50   ` Thomas Hellström
2025-01-29 19:52 ` [PATCH v4 33/33] drm/doc: gpusvm: Add GPU SVM documentation Matthew Brost
2025-02-07 14:54   ` Thomas Hellström
2025-01-29 21:04 ` ✓ CI.Patch_applied: success for Introduce GPU SVM and Xe SVM implementation (rev4) Patchwork
2025-01-29 21:05 ` ✗ CI.checkpatch: warning " Patchwork
2025-01-29 21:06 ` ✗ CI.KUnit: failure " Patchwork
2025-01-30 13:52 ` [PATCH v4 00/33] Introduce GPU SVM and Xe SVM implementation Gwan-gyeong Mun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=912b7cd94d00d84525d58bb3cebc7b82ad0129b2.camel@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=airlied@gmail.com \
    --cc=apopple@nvidia.com \
    --cc=dakr@kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=felix.kuehling@amd.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    --cc=simona.vetter@ffwll.ch \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox