Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: "Lin, Shuicheng" <shuicheng.lin@intel.com>
Cc: "Ghimiray, Himal Prasad" <himal.prasad.ghimiray@intel.com>,
	"intel-xe@lists.freedesktop.org" <intel-xe@lists.freedesktop.org>,
	"thomas.hellstrom@linux.intel.com"
	<thomas.hellstrom@linux.intel.com>
Subject: Re: [PATCH v4 09/20] drm/xe: Implement madvise ioctl for xe
Date: Wed, 25 Jun 2025 23:15:24 -0700	[thread overview]
Message-ID: <aFzlfMdVOAAI+zO4@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <DM4PR11MB5456041A620E64B89F40BB9EEA7AA@DM4PR11MB5456.namprd11.prod.outlook.com>

On Thu, Jun 26, 2025 at 12:04:07AM -0600, Lin, Shuicheng wrote:
> On Fri, June 13, 2025 8:56 PM Himal Prasad Ghimiray wrote:
> > This driver-specific ioctl enables UMDs to control the memory attributes for GPU
> > VMAs within a specified input range. If the start or end addresses fall within an
> > existing VMA, the VMA is split accordingly. The attributes of the VMA are
> > modified as provided by the users. The old mappings of the VMAs are invalidated,
> > and TLB invalidation is performed if necessary.
> > 
> > v2(Matthew brost)
> > - xe_vm_in_fault_mode can't be enabled by Mesa, hence allow ioctl in non fault
> > mode too
> > - fix tlb invalidation skip for same ranges in multiple op
> > - use helper for tlb invalidation
> > - use xe_svm_notifier_lock/unlock helper
> > - s/lockdep_assert_held/lockdep_assert_held_write
> > - Add kernel-doc
> > 
> > v3(Matthew Brost)
> > - make vfunc fail safe
> > - Add sanitizing input args before vfunc
> > 
> > Cc: Matthew Brost <matthew.brost@intel.com>
> > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > ---
> >  drivers/gpu/drm/xe/Makefile        |   1 +
> >  drivers/gpu/drm/xe/xe_device.c     |   2 +
> >  drivers/gpu/drm/xe/xe_vm_madvise.c | 282
> > +++++++++++++++++++++++++++++  drivers/gpu/drm/xe/xe_vm_madvise.h |  15
> > ++
> >  4 files changed, 300 insertions(+)
> >  create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
> >  create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
> > 
> > diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index
> > f5f5775acdc0..d375b549c30f 100644
> > --- a/drivers/gpu/drm/xe/Makefile
> > +++ b/drivers/gpu/drm/xe/Makefile
> > @@ -117,6 +117,7 @@ xe-y += xe_bb.o \
> >  	xe_uc.o \
> >  	xe_uc_fw.o \
> >  	xe_vm.o \
> > +	xe_vm_madvise.o \
> >  	xe_vram.o \
> >  	xe_vram_freq.o \
> >  	xe_vsec.o \
> > diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> > index 7d9a31868ea9..632d3ab12392 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -61,6 +61,7 @@
> >  #include "xe_ttm_stolen_mgr.h"
> >  #include "xe_ttm_sys_mgr.h"
> >  #include "xe_vm.h"
> > +#include "xe_vm_madvise.h"
> >  #include "xe_vram.h"
> >  #include "xe_vsec.h"
> >  #include "xe_wait_user_fence.h"
> > @@ -197,6 +198,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> >  	DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE,
> > xe_wait_user_fence_ioctl,
> >  			  DRM_RENDER_ALLOW),
> >  	DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl,
> > DRM_RENDER_ALLOW),
> > +	DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl,
> > DRM_RENDER_ALLOW),
> >  };
> > 
> >  static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff
> > --git a/drivers/gpu/drm/xe/xe_vm_madvise.c
> > b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > new file mode 100644
> > index 000000000000..ff560914ad7e
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > @@ -0,0 +1,282 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2025 Intel Corporation
> > + */
> > +
> > +#include "xe_vm_madvise.h"
> > +
> > +#include <linux/nospec.h>
> > +#include <drm/ttm/ttm_tt.h>
> > +#include <drm/xe_drm.h>
> > +
> > +#include "xe_bo.h"
> > +#include "xe_gt_tlb_invalidation.h"
> > +#include "xe_pt.h"
> > +#include "xe_svm.h"
> > +
> > +struct xe_vmas_in_madvise_range {
> > +	u64 addr;
> > +	u64 range;
> > +	struct xe_vma **vmas;
> > +	int num_vmas;
> > +	bool has_svm_vmas;
> > +	bool has_bo_vmas;
> > +	bool has_userptr_vmas;
> > +};
> > +
> > +static int get_vmas(struct xe_vm *vm, struct xe_vmas_in_madvise_range
> > +*madvise_range) {
> > +	u64 addr = madvise_range->addr;
> > +	u64 range = madvise_range->range;
> > +
> > +	struct xe_vma  **__vmas;
> > +	struct drm_gpuva *gpuva;
> > +	int max_vmas = 8;
> > +
> > +	lockdep_assert_held(&vm->lock);
> > +
> > +	madvise_range->num_vmas = 0;
> > +	madvise_range->vmas = kmalloc_array(max_vmas,
> > sizeof(*madvise_range->vmas), GFP_KERNEL);
> > +	if (!madvise_range->vmas)
> > +		return -ENOMEM;
> > +
> > +	vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx,
> > end=0x%016llx",
> > +addr, addr + range);
> > +
> > +	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr +
> > range) {
> > +		struct xe_vma *vma = gpuva_to_vma(gpuva);
> > +
> > +		if (xe_vma_bo(vma))
> > +			madvise_range->has_bo_vmas = true;
> > +		else if (xe_vma_is_cpu_addr_mirror(vma))
> > +			madvise_range->has_svm_vmas = true;
> > +		else if (xe_vma_is_userptr(vma))
> > +			madvise_range->has_userptr_vmas = true;
> > +		else
> > +			XE_WARN_ON("UNEXPECTED VMA");
> > +
> > +		if (madvise_range->num_vmas == max_vmas) {
> > +			max_vmas <<= 1;
> > +			__vmas = krealloc(madvise_range->vmas,
> > +					  max_vmas * sizeof(*madvise_range-
> > >vmas),
> > +					  GFP_KERNEL);
> > +			if (!__vmas) {
> > +				kfree(madvise_range->vmas);
> > +				return -ENOMEM;
> > +			}
> > +			madvise_range->vmas = __vmas;
> > +		}
> > +
> > +		madvise_range->vmas[madvise_range->num_vmas] = vma;
> > +		(madvise_range->num_vmas)++;
> > +	}
> > +
> > +	if (!madvise_range->num_vmas)
> > +		kfree(madvise_range->vmas);
> > +
> > +	vm_dbg(&vm->xe->drm, "madvise_range-num_vmas = %d\n",
> > +madvise_range->num_vmas);
> > +
> > +	return 0;
> > +}
> > +
> > +static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm
> > *vm,
> > +				      struct xe_vma **vmas, int num_vmas,
> > +				      struct drm_xe_madvise *op)
> > +{
> > +	/* Implementation pending */
> > +}
> > +
> > +static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> > +			   struct xe_vma **vmas, int num_vmas,
> > +			   struct drm_xe_madvise *op)
> > +{
> > +	/* Implementation pending */
> > +}
> > +
> > +static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> > +			      struct xe_vma **vmas, int num_vmas,
> > +			      struct drm_xe_madvise *op)
> > +{
> > +	/* Implementation pending */
> > +}
> > +
> > +typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
> > +			     struct xe_vma **vmas, int num_vmas,
> > +			     struct drm_xe_madvise *op);
> > +
> > +static const madvise_func madvise_funcs[] = {
> > +	[DRM_XE_VMA_ATTR_PREFERRED_LOC] =
> > madvise_preferred_mem_loc,
> > +	[DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
> > +	[DRM_XE_VMA_ATTR_PAT] = madvise_pat_index, };
> > +
> > +static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start,
> > +u64 end, u8 *tile_mask) {
> > +	struct drm_gpuva *gpuva;
> > +	struct xe_tile *tile;
> > +	u8 id;
> > +
> > +	lockdep_assert_held_write(&vm->lock);
> > +
> > +	if (dma_resv_wait_timeout(xe_vm_resv(vm),
> > DMA_RESV_USAGE_BOOKKEEP,
> > +				  false, MAX_SCHEDULE_TIMEOUT) <= 0)
> > +		XE_WARN_ON(1);
> > +
> > +	*tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
> > +
> > +	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> > +		struct xe_vma *vma = gpuva_to_vma(gpuva);
> > +
> > +		if (xe_vma_is_cpu_addr_mirror(vma))
> > +			continue;
> > +
> > +		if (xe_vma_is_userptr(vma)) {
> > +
> > 	WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(v
> > ma)),
> > +
> > DMA_RESV_USAGE_BOOKKEEP));
> > +		}
> > +
> > +		for_each_tile(tile, vm->xe, id) {
> > +			if (xe_pt_zap_ptes(tile, vma)) {
> > +				*tile_mask |= BIT(id);
> > +				vma->tile_invalidated |= BIT(id);
> > +			}
> > +		}
> > +	}
> > +}
> > +
> > +static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start,
> > +u64 end) {
> > +	u8 tile_mask = 0;
> > +
> > +	xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
> > +	if (!tile_mask)
> > +		return 0;
> > +
> > +	xe_device_wmb(vm->xe);
> > +
> > +	return xe_vm_range_tilemask_tlb_invalidation(vm, start, end,
> > +tile_mask); }
> > +
> > +static int drm_xe_madvise_args_are_sane(struct xe_device *xe, const
> > +struct drm_xe_madvise *args) {
> > +	if (XE_IOCTL_DBG(xe, !args))
> > +		return -EINVAL;
> > +
> > +	if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->start, SZ_4K)))
> > +		return -EINVAL;
> > +
> > +	if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->range, SZ_4K)))
> > +		return -EINVAL;
> > +
> > +	if (XE_IOCTL_DBG(xe, args->range < SZ_4K))
> > +		return -EINVAL;
> > +
> > +	switch (args->type) {
> > +	case DRM_XE_VMA_ATTR_ATOMIC:
> > +		if (XE_IOCTL_DBG(xe, args->atomic.val >
> > DRM_XE_VMA_ATOMIC_CPU))
> > +			return -EINVAL;
> > +		break;
> > +	case DRM_XE_VMA_ATTR_PAT:
> > +		/*TODO: Add valid pat check */
> > +		break;
> > +	case DRM_XE_VMA_ATTR_PREFERRED_LOC:
> > +		if (XE_IOCTL_DBG(xe, args-
> > >preferred_mem_loc.migration_policy >
> > +				     DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES))
> > +			return -EINVAL;
> > +		break;
> > +	default:
> > +		if (XE_IOCTL_DBG(xe, 1))
> > +			return -EINVAL;
> > +	}
> > +
> > +	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > +		return -EINVAL;
> > +
> > +	return 0;
> > +}
> > +
> > +/**
> > + * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
> > + * @dev: DRM device pointer
> > + * @data: Pointer to ioctl data (drm_xe_madvise*)
> > + * @file: DRM file pointer
> > + *
> > + * Handles the MADVISE ioctl to provide memory advice for vma's within
> > + * input range.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct
> > +drm_file *file) {
> > +	struct xe_device *xe = to_xe_device(dev);
> > +	struct xe_file *xef = to_xe_file(file);
> > +	struct drm_xe_madvise *args = data;
> > +	struct xe_vm *vm;
> > +	struct xe_bo *bo;
> > +	struct drm_exec exec;
> > +	int err = 0;
> > +	int attr_type;
> > +
> > +	vm = xe_vm_lookup(xef, args->vm_id);
> > +	if (XE_IOCTL_DBG(xe, !vm))
> > +		return -EINVAL;
> > +
> > +	if (drm_xe_madvise_args_are_sane(vm->xe, args))
> > +		return -EINVAL;
> 
> The upper error return will miss the "xe_vm_put(vm)".
> BTW, the function name drm_xe_madvise_args_are_sane is somehow a little misleading. The name looks like a boolean function, while the return value is 0 for success and error code for failure.
> 

Agree with Shuicheng, drm_xe_madvise_args_are_sane should be a bool,
need to avoid a leak of the VM on a failure.

Also will I'm here, avoid drm_* prefix in Xe code. So...

s/drm_xe_madvise_args_are_sane/madvise_args_are_sane

Matt

> Shuicheng 
> 
> > +
> > +	down_write(&vm->lock);
> > +
> > +	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> > +		err = -ENOENT;
> > +		goto unlock_vm;
> > +	}
> > +
> > +	xe_vm_alloc_madvise_vma(vm, args->start, args->range);
> > +
> > +	struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start,
> > +							 .range =  args-
> > >range, };
> > +	err = get_vmas(vm, &madvise_range);
> > +	if (err || !madvise_range.num_vmas)
> > +		goto unlock_vm;
> > +
> > +	if (madvise_range.has_bo_vmas) {
> > +		drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
> > +		drm_exec_until_all_locked(&exec) {
> > +			for (int i = 0; i < madvise_range.num_vmas; i++) {
> > +				bo = xe_vma_bo(madvise_range.vmas[i]);
> > +				if (!bo)
> > +					continue;
> > +				err = drm_exec_lock_obj(&exec, &bo-
> > >ttm.base);
> > +				drm_exec_retry_on_contention(&exec);
> > +				if (err)
> > +					goto err_fini;
> > +			}
> > +		}
> > +	}
> > +
> > +	if (madvise_range.has_userptr_vmas)
> > +		down_read(&vm->userptr.notifier_lock);
> > +
> > +	if (madvise_range.has_svm_vmas)
> > +		xe_svm_notifier_lock(vm);
> > +
> > +	attr_type = array_index_nospec(args->type,
> > ARRAY_SIZE(madvise_funcs));
> > +	madvise_funcs[attr_type](xe, vm, madvise_range.vmas,
> > +madvise_range.num_vmas, args);
> > +
> > +	kfree(madvise_range.vmas);
> > +	madvise_range.vmas = NULL;
> > +
> > +	err = xe_vm_invalidate_madvise_range(vm, args->start, args->start +
> > +args->range);
> > +
> > +	if (madvise_range.has_svm_vmas)
> > +		xe_svm_notifier_unlock(vm);
> > +
> > +	if (madvise_range.has_userptr_vmas)
> > +		up_read(&vm->userptr.notifier_lock);
> > +err_fini:
> > +	if (madvise_range.has_bo_vmas)
> > +		drm_exec_fini(&exec);
> > +unlock_vm:
> > +	up_write(&vm->lock);
> > +	xe_vm_put(vm);
> > +	return err;
> > +}
> > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h
> > b/drivers/gpu/drm/xe/xe_vm_madvise.h
> > new file mode 100644
> > index 000000000000..b0e1fc445f23
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
> > @@ -0,0 +1,15 @@
> > +/* SPDX-License-Identifier: MIT */
> > +/*
> > + * Copyright © 2025 Intel Corporation
> > + */
> > +
> > +#ifndef _XE_VM_MADVISE_H_
> > +#define _XE_VM_MADVISE_H_
> > +
> > +struct drm_device;
> > +struct drm_file;
> > +
> > +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
> > +			struct drm_file *file);
> > +
> > +#endif
> > --
> > 2.34.1
> 

  reply	other threads:[~2025-06-26  6:13 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-13 12:55 [PATCH v4 00/20] MADVISE FOR XE Himal Prasad Ghimiray
2025-06-13 12:43 ` ✗ CI.checkpatch: warning for MADVISE FOR XE (rev2) Patchwork
2025-06-13 12:44 ` ✗ CI.KUnit: failure " Patchwork
2025-06-13 12:55 ` [PATCH v4 01/20] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 02/20] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-06-13 14:15   ` Souza, Jose
2025-06-23  4:30   ` Matthew Brost
2025-06-23  6:20     ` Ghimiray, Himal Prasad
2025-06-27 13:47       ` Thomas Hellström
2025-06-27 14:29     ` Thomas Hellström
2025-06-27 18:13       ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 03/20] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
2025-06-23  4:18   ` Matthew Brost
2025-06-23  6:21     ` Ghimiray, Himal Prasad
2025-06-27 14:32     ` Thomas Hellström
2025-06-13 12:55 ` [PATCH v4 04/20] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 05/20] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-06-23  4:38   ` Matthew Brost
2025-06-23 16:21     ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 06/20] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 07/20] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 08/20] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
2025-06-23  4:56   ` Matthew Brost
2025-06-23  6:25     ` Ghimiray, Himal Prasad
2025-06-13 12:55 ` [PATCH v4 09/20] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-06-23  5:33   ` Matthew Brost
2025-06-26  6:04   ` Lin, Shuicheng
2025-06-26  6:15     ` Matthew Brost [this message]
2025-06-26  8:36       ` Ghimiray, Himal Prasad
2025-06-26  8:34     ` Ghimiray, Himal Prasad
2025-06-13 12:55 ` [PATCH v4 10/20] drm/xe/vm: Add an identifier for madvise in xe_vma_ops Himal Prasad Ghimiray
2025-06-23  5:38   ` Matthew Brost
2025-06-23  6:28     ` Ghimiray, Himal Prasad
2025-06-13 12:55 ` [PATCH v4 11/20] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
2025-06-14  4:31   ` kernel test robot
2025-06-23  5:52   ` Matthew Brost
2025-06-23  6:18     ` Ghimiray, Himal Prasad
2025-06-23 11:45       ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 12/20] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
2025-06-23 16:32   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 13/20] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
2025-06-13 23:31   ` kernel test robot
2025-06-14  5:33   ` kernel test robot
2025-06-13 12:55 ` [PATCH v4 14/20] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
2025-06-23 16:34   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 15/20] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
2025-06-23 16:36   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 16/20] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-06-23 22:07   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 17/20] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 18/20] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
2025-06-23 16:19   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 19/20] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
2025-06-23 22:43   ` Matthew Brost
2025-06-24  2:18   ` Matthew Brost
2025-06-27 13:20     ` Thomas Hellström
2025-06-27 13:43     ` Thomas Hellström
2025-06-26  3:44   ` Lin, Shuicheng
2025-06-13 12:55 ` [PATCH v4 20/20] drm/xe/madvise: Skip vma invalidation if mem attr are unchanged Himal Prasad Ghimiray
2025-06-23 22:28   ` Matthew Brost
2025-06-26  8:54     ` Ghimiray, Himal Prasad
2025-06-16  4:30 ` ✗ CI.checkpatch: warning for MADVISE FOR XE (rev3) Patchwork
2025-06-16  4:31 ` ✓ CI.KUnit: success " Patchwork
2025-06-16  4:45 ` ✗ CI.checksparse: warning " Patchwork
2025-06-16  5:13 ` ✓ Xe.CI.BAT: success " Patchwork
2025-06-16 15:06 ` ✗ Xe.CI.Full: failure " Patchwork
2025-07-29  4:41 ` [PATCH v4 00/20] MADVISE FOR XE Matthew Brost
2025-07-30 11:16   ` Ghimiray, Himal Prasad

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aFzlfMdVOAAI+zO4@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=shuicheng.lin@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox