Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: <intel-xe@lists.freedesktop.org>, <thomas.hellstrom@linux.intel.com>
Subject: Re: [PATCH v2 23/32] drm/xe: Implement madvise ioctl for xe
Date: Wed, 14 May 2025 14:41:24 -0700	[thread overview]
Message-ID: <aCUOBPal7m8PF67H@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20250407101719.3350996-24-himal.prasad.ghimiray@intel.com>

On Mon, Apr 07, 2025 at 03:47:10PM +0530, Himal Prasad Ghimiray wrote:
> This driver-specific ioctl enables UMDs to control the memory attributes
> for GPU VMAs within a specified input range. If the start or end
> addresses fall within an existing VMA, the VMA is split accordingly. The
> attributes of the VMA are modified as provided by the users. The old
> mappings of the VMAs are invalidated, and TLB invalidation is performed
> if necessary.
> 
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
>  drivers/gpu/drm/xe/Makefile        |   1 +
>  drivers/gpu/drm/xe/xe_device.c     |   2 +
>  drivers/gpu/drm/xe/xe_vm_madvise.c | 309 +++++++++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_vm_madvise.h |  15 ++
>  4 files changed, 327 insertions(+)
>  create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
>  create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
> 
> diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> index e4fec90bab55..3e83ae8b9dc1 100644
> --- a/drivers/gpu/drm/xe/Makefile
> +++ b/drivers/gpu/drm/xe/Makefile
> @@ -117,6 +117,7 @@ xe-y += xe_bb.o \
>  	xe_uc.o \
>  	xe_uc_fw.o \
>  	xe_vm.o \
> +	xe_vm_madvise.o \
>  	xe_vram.o \
>  	xe_vram_freq.o \
>  	xe_vsec.o \
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index d8e227ddf255..3e57300014bf 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -60,6 +60,7 @@
>  #include "xe_ttm_stolen_mgr.h"
>  #include "xe_ttm_sys_mgr.h"
>  #include "xe_vm.h"
> +#include "xe_vm_madvise.h"
>  #include "xe_vram.h"
>  #include "xe_vsec.h"
>  #include "xe_wait_user_fence.h"
> @@ -196,6 +197,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
>  	DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
>  			  DRM_RENDER_ALLOW),
>  	DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
>  };
>  
>  static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> new file mode 100644
> index 000000000000..ef50031649e0
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -0,0 +1,309 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#include "xe_vm_madvise.h"
> +
> +#include <linux/nospec.h>
> +#include <drm/ttm/ttm_tt.h>
> +#include <drm/xe_drm.h>
> +
> +#include "xe_bo.h"
> +#include "xe_gt_tlb_invalidation.h"
> +#include "xe_pt.h"
> +#include "xe_svm.h"
> +
> +static struct xe_vma **get_vmas(struct xe_vm *vm, int *num_vmas,
> +				u64 addr, u64 range)
> +{
> +	struct xe_vma **vmas, **__vmas;
> +	struct drm_gpuva *gpuva;
> +	int max_vmas = 8;
> +
> +	lockdep_assert_held(&vm->lock);

lockdep_assert_held_write

> +
> +	*num_vmas = 0;
> +	vmas = kmalloc_array(max_vmas, sizeof(*vmas), GFP_KERNEL);
> +	if (!vmas)
> +		return NULL;
> +
> +	vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
> +
> +	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
> +		struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> +		if (*num_vmas == max_vmas) {
> +			max_vmas <<= 1;
> +			__vmas = krealloc(vmas, max_vmas * sizeof(*vmas), GFP_KERNEL);
> +			if (!__vmas) {
> +				kfree(vmas);
> +				return NULL;
> +			}
> +			vmas = __vmas;
> +		}
> +
> +		vmas[*num_vmas] = vma;
> +		(*num_vmas)++;
> +	}
> +
> +	vm_dbg(&vm->xe->drm, "*num_vmas = %d\n", *num_vmas);
> +
> +	if (!*num_vmas) {
> +		kfree(vmas);
> +		return NULL;
> +	}
> +
> +	return vmas;
> +}
> +
> +static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
> +				     struct xe_vma **vmas, int num_vmas,
> +				     struct drm_xe_madvise_ops ops)
> +{
> +	/* Implementation pending */
> +	return 0;
> +}
> +
> +static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> +			  struct xe_vma **vmas, int num_vmas,
> +			  struct drm_xe_madvise_ops ops)
> +{
> +	/* Implementation pending */
> +	return 0;
> +}
> +
> +static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> +			     struct xe_vma **vmas, int num_vmas,
> +			     struct drm_xe_madvise_ops ops)
> +{
> +	/* Implementation pending */
> +	return 0;
> +}
> +
> +static int madvise_purgeable_state(struct xe_device *xe, struct xe_vm *vm,
> +				   struct xe_vma **vmas, int num_vmas,
> +				   struct drm_xe_madvise_ops ops)
> +{
> +	/* Implementation pending */
> +	return 0;
> +}
> +
> +typedef int (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
> +			    struct xe_vma **vmas, int num_vmas, struct drm_xe_madvise_ops ops);
> +
> +static const madvise_func madvise_funcs[] = {
> +	[DRM_XE_VMA_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
> +	[DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
> +	[DRM_XE_VMA_ATTR_PAT] = madvise_pat_index,
> +	[DRM_XE_VMA_ATTR_PURGEABLE_STATE] = madvise_purgeable_state,
> +};
> +
> +static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end, u8 *tile_mask)
> +{
> +	struct drm_gpusvm_notifier *notifier;
> +	struct drm_gpuva *gpuva;
> +	struct xe_svm_range *range;
> +	struct xe_tile *tile;
> +	u64 adj_start, adj_end;
> +	u8 id;
> +
> +	lockdep_assert_held(&vm->lock);

lockdep_assert_held_write

> +

	/* Waiting on pending binds */

> +	if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
> +				  false, MAX_SCHEDULE_TIMEOUT) <= 0)
> +		XE_WARN_ON(1);
> +
> +	down_write(&vm->svm.gpusvm.notifier_lock);
> +

xe_svm_notifier_lock

> +	drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
> +		struct drm_gpusvm_range *r = NULL;
> +
> +		adj_start = max(start, notifier->itree.start);
> +		adj_end = min(end, notifier->itree.last + 1);
> +		drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
> +			range = to_xe_range(r);
> +			for_each_tile(tile, vm->xe, id) {
> +				if (xe_pt_zap_ptes_range(tile, vm, range)) {
> +					*tile_mask |= BIT(id);
> +					range->tile_invalidated |= BIT(id);
> +				}
> +			}
> +		}
> +	}
> +
> +	up_write(&vm->svm.gpusvm.notifier_lock);
> +

xe_svm_notifier_unlock

> +	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> +		struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> +		if (xe_vma_is_cpu_addr_mirror(vma))
> +			continue;
> +
> +		if (xe_vma_is_userptr(vma)) {
> +			WARN_ON_ONCE(!mmu_interval_check_retry
> +				    (&to_userptr_vma(vma)->userptr.notifier,
> +				     to_userptr_vma(vma)->userptr.notifier_seq));
> +
> +			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
> +							     DMA_RESV_USAGE_BOOKKEEP));
> +		}
> +
> +		if (xe_vma_bo(vma))
> +			xe_bo_lock(xe_vma_bo(vma), false);
> +

Do you need the BO's dma-resv lock here? I don't think you do. Maybe double
check with Thomas on this one as I could be forgeting something here. 

> +		for_each_tile(tile, vm->xe, id) {
> +			if (xe_pt_zap_ptes(tile, vma))
> +				*tile_mask |= BIT(id);
> +		}
> +
> +		if (xe_vma_bo(vma))
> +			xe_bo_unlock(xe_vma_bo(vma));
> +	}
> +}
> +
> +static void xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
> +{
> +	struct xe_gt_tlb_invalidation_fence
> +		fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> +	struct xe_tile *tile;
> +	u32 fence_id = 0;
> +	u8 tile_mask = 0;
> +	u8 id;
> +
> +	xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
> +	if (!tile_mask)
> +		return;
> +
> +	xe_device_wmb(vm->xe);
> +

We have the below pattern in a few places in the driver. I wonder if it
time for a helper?

> +	for_each_tile(tile, vm->xe, id) {
> +		if (tile_mask & BIT(id)) {
> +			int err;
> +
> +			xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> +							  &fence[fence_id], true);
> +
> +			err = xe_gt_tlb_invalidation_range(tile->primary_gt,
> +							   &fence[fence_id],
> +							   start,
> +							   end,
> +							   vm->usm.asid);
> +			if (WARN_ON_ONCE(err < 0))
> +				goto wait;
> +			++fence_id;
> +
> +			if (!tile->media_gt)
> +				continue;
> +
> +			xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> +							  &fence[fence_id], true);
> +
> +			err = xe_gt_tlb_invalidation_range(tile->media_gt,
> +							   &fence[fence_id],
> +							   start,
> +							   end,
> +							   vm->usm.asid);
> +			if (WARN_ON_ONCE(err < 0))
> +				goto wait;
> +			++fence_id;
> +		}
> +	}
> +
> +wait:
> +	for (id = 0; id < fence_id; ++id)
> +		xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> +}
> +
> +static int input_ranges_same(struct drm_xe_madvise_ops *old,
> +			     struct drm_xe_madvise_ops *new)
> +{
> +	return (new->start == old->start && new->range == old->range);
> +}
> +
> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)

Kernel doc.

> +{
> +	struct xe_device *xe = to_xe_device(dev);
> +	struct xe_file *xef = to_xe_file(file);
> +	struct drm_xe_madvise_ops *advs_ops;
> +	struct drm_xe_madvise *args = data;
> +	struct xe_vm *vm;
> +	struct xe_vma **vmas = NULL;
> +	int num_vmas, err = 0;
> +	int i, j, attr_type;
> +
> +	if (XE_IOCTL_DBG(xe, args->num_ops < 1))
> +		return -EINVAL;
> +
> +	vm = xe_vm_lookup(xef, args->vm_id);
> +	if (XE_IOCTL_DBG(xe, !vm))
> +		return -EINVAL;
> +
> +	if (XE_IOCTL_DBG(xe, !xe_vm_in_fault_mode(vm))) {

Do we want to restrict this fault mode? Maybe check with Mesa if they
see any use cases.

> +		err = -EINVAL;
> +		goto put_vm;
> +	}
> +
> +	down_write(&vm->lock);
> +
> +	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> +		err = -ENOENT;
> +		goto unlock_vm;
> +	}
> +
> +	if (args->num_ops > 1) {
> +		u64 __user *madvise_user = u64_to_user_ptr(args->vector_of_ops);
> +
> +		advs_ops = kvmalloc_array(args->num_ops, sizeof(struct drm_xe_madvise_ops),
> +					  GFP_KERNEL | __GFP_ACCOUNT |
> +					  __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> +		if (!advs_ops)
> +			return args->num_ops > 1 ? -ENOBUFS : -ENOMEM;
> +
> +		err = __copy_from_user(advs_ops, madvise_user,
> +				       sizeof(struct drm_xe_madvise_ops) *
> +				       args->num_ops);
> +		if (XE_IOCTL_DBG(xe, err)) {
> +			err = -EFAULT;
> +			goto free_advs_ops;
> +		}
> +	} else {
> +		advs_ops = &args->ops;
> +	}
> +
> +	for (i = 0; i < args->num_ops; i++) {
> +		xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
> +
> +		vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
> +		if (!vmas) {
> +			err = -ENOMEM;
> +			goto unlock_vm;
> +		}
> +
> +		attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
> +		err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
> +
> +		kfree(vmas);
> +		vmas = NULL;
> +
> +		if (err)
> +			break;
> +	}
> +
> +	for (i = 0; i < args->num_ops; i++) {
> +		for (j = i + 1; j < args->num_ops; ++j) {
> +			if (input_ranges_same(&advs_ops[j], &advs_ops[i]))
> +				break;
> +		}

The above loop doesn't look like it actually does anything.

Matt

> +		xe_vm_invalidate_madvise_range(vm, advs_ops[i].start,
> +					       advs_ops[i].start + advs_ops[i].range);
> +	}
> +free_advs_ops:
> +	if (args->num_ops > 1)
> +		kvfree(advs_ops);
> +unlock_vm:
> +	up_write(&vm->lock);
> +put_vm:
> +	xe_vm_put(vm);
> +	return err;
> +}
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
> new file mode 100644
> index 000000000000..c5cdd058c322
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
> @@ -0,0 +1,15 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#ifndef _XE_VM_MADVISE_H_
> +#define _XE_VM_MADVISE_H_
> +
> +struct drm_device;
> +struct drm_file;
> +
> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
> +			struct drm_file *file);
> +
> +#endif
> -- 
> 2.34.1
> 

  reply	other threads:[~2025-05-14 21:40 UTC|newest]

Thread overview: 120+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-07 10:16 [PATCH v2 00/32] PREFETCH and MADVISE for SVM ranges Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 01/32] drm/xe: Introduce xe_vma_op_prefetch_range struct for prefetch of ranges Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 02/32] drm/xe: Make xe_svm_alloc_vram public Himal Prasad Ghimiray
2025-04-17  2:50   ` Matthew Brost
2025-04-21  4:06     ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 03/32] drm/xe/svm: Helper to add tile masks to svm ranges Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 04/32] drm/xe/svm: Make to_xe_range a public function Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 05/32] drm/xe/svm: Make xe_svm_range_* end/start/size public Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 06/32] drm/xe/vm: Update xe_vma_ops_incr_pt_update_ops to take an increment value Himal Prasad Ghimiray
2025-04-17  0:10   ` Matthew Brost
2025-04-21  4:09     ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 07/32] drm/xe/vm: Add an identifier in xe_vma_ops for svm prefetch Himal Prasad Ghimiray
2025-04-17  2:53   ` Matthew Brost
2025-04-07 10:16 ` [PATCH v2 08/32] drm/xe: Rename lookup_vma function to xe_find_vma_by_addr Himal Prasad Ghimiray
2025-04-07 22:42   ` kernel test robot
2025-04-07 10:16 ` [PATCH v2 09/32] drm/xe/svm: Allow unaligned addresses and ranges for prefetch Himal Prasad Ghimiray
2025-04-17  2:53   ` Matthew Brost
2025-04-07 10:16 ` [PATCH v2 10/32] drm/xe/svm: Refactor usage of drm_gpusvm* function in xe_svm Himal Prasad Ghimiray
2025-04-17  2:57   ` Matthew Brost
2025-04-21  4:30     ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 11/32] drm/xe/svm: Add function to determine if range needs VRAM migration Himal Prasad Ghimiray
2025-04-17  3:05   ` Matthew Brost
2025-04-21  4:52     ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 12/32] drm/gpusvm: Introduce vram_only flag for VRAM allocation Himal Prasad Ghimiray
2025-04-17  3:07   ` Matthew Brost
2025-04-21  4:55     ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 13/32] drm/xe/svm: Incase of atomic access ensure get_pages happens from vram Himal Prasad Ghimiray
2025-04-17  4:19   ` Matthew Brost
2025-04-21  4:58     ` Ghimiray, Himal Prasad
2025-04-21  6:29       ` Ghimiray, Himal Prasad
2025-04-22 15:25         ` Matthew Brost
2025-04-22 15:27       ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 14/32] drm/xe/svm: Implement prefetch support for SVM ranges Himal Prasad Ghimiray
2025-04-17  4:54   ` Matthew Brost
2025-04-24 10:03     ` Ghimiray, Himal Prasad
2025-04-24 23:48   ` Matthew Brost
2025-04-28  6:44     ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 15/32] drm/xe/vm: Add debug prints for SVM range prefetch Himal Prasad Ghimiray
2025-04-17  4:56   ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 16/32] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
2025-04-07 10:30   ` Boris Brezillon
2025-05-26 13:48     ` Ghimiray, Himal Prasad
2025-04-07 22:42   ` kernel test robot
2025-04-07 10:17 ` [PATCH v2 17/32] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-04-17 18:19   ` Souza, Jose
2025-04-17 18:24     ` Souza, Jose
2025-04-22 15:34       ` Matthew Brost
2025-04-22 15:55         ` Souza, Jose
2025-04-22 16:19           ` Matthew Brost
2025-04-22 15:40     ` Matthew Brost
2025-04-22 16:02       ` Souza, Jose
2025-04-22 16:12         ` Matthew Brost
2025-04-22 16:16           ` Souza, Jose
2025-05-02 14:00   ` Thomas Hellström
2025-05-20  8:13     ` Ghimiray, Himal Prasad
2025-05-20  8:49     ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 18/32] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
2025-05-14 18:36   ` Matthew Brost
2025-05-20  9:27     ` Ghimiray, Himal Prasad
2025-05-27 17:37       ` Matthew Brost
2025-05-28  5:33         ` Ghimiray, Himal Prasad
2025-05-28 16:09           ` Matthew Brost
2025-05-28 16:16             ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 19/32] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
2025-05-14 18:37   ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 20/32] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-05-13  2:36   ` Matthew Brost
2025-05-14 18:40     ` Matthew Brost
2025-05-20  9:28       ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 21/32] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
2025-04-08  1:49   ` kernel test robot
2025-05-14 18:47   ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 22/32] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-05-14 19:01   ` Matthew Brost
2025-05-20  9:46     ` Ghimiray, Himal Prasad
2025-05-14 19:02   ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 23/32] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-05-14 21:41   ` Matthew Brost [this message]
2025-05-20 10:15     ` Ghimiray, Himal Prasad
2025-05-28  5:22       ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 24/32] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
2025-05-14 19:20   ` Matthew Brost
2025-05-20 10:21     ` Ghimiray, Himal Prasad
2025-05-27 17:32       ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 25/32] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
2025-05-14 22:21   ` Matthew Brost
2025-05-20 10:22     ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 26/32] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
2025-05-14 22:04   ` Matthew Brost
2025-05-21  8:50     ` Ghimiray, Himal Prasad
2025-05-21 16:51       ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 27/32] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
2025-05-14 21:52   ` Matthew Brost
2025-05-21  8:51     ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 28/32] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
2025-05-14 21:05   ` Matthew Brost
2025-05-21  8:52     ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 29/32] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-05-14 22:17   ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 30/32] drm/xe/uapi: Add uapi for vma count and mem attributes Himal Prasad Ghimiray
2025-05-14 21:08   ` Matthew Brost
2025-05-21  8:54     ` Ghimiray, Himal Prasad
2025-05-28 16:18       ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 31/32] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
2025-05-14 21:10   ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 32/32] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
2025-05-14 22:31   ` Matthew Brost
2025-05-21  9:13     ` Ghimiray, Himal Prasad
2025-04-07 14:07 ` ✓ CI.Patch_applied: success for PREFETCH and MADVISE for SVM ranges (rev3) Patchwork
2025-04-07 14:07 ` ✗ CI.checkpatch: warning " Patchwork
2025-04-07 14:09 ` ✓ CI.KUnit: success " Patchwork
2025-04-07 14:12 ` ✗ CI.Build: failure " Patchwork
2025-04-09  5:11 ` ✓ CI.Patch_applied: success for PREFETCH and MADVISE for SVM ranges (rev4) Patchwork
2025-04-09  5:11 ` ✗ CI.checkpatch: warning " Patchwork
2025-04-09  5:12 ` ✓ CI.KUnit: success " Patchwork
2025-04-09  5:29 ` ✓ CI.Build: " Patchwork
2025-04-09  5:31 ` ✗ CI.Hooks: failure " Patchwork
2025-04-09  5:32 ` ✗ CI.checksparse: warning " Patchwork
2025-04-09  5:52 ` ✓ Xe.CI.BAT: success " Patchwork
2025-04-09  7:00 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aCUOBPal7m8PF67H@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox