From: "Zanoni, Paulo R" <paulo.r.zanoni@intel.com>
To: "intel-xe@lists.freedesktop.org" <intel-xe@lists.freedesktop.org>,
"Brost, Matthew" <matthew.brost@intel.com>
Cc: "thomas.hellstrom@linux.intel.com" <thomas.hellstrom@linux.intel.com>
Subject: Re: [PATCH] drm/xe: Don't allow evicting of BOs in same VM in array of VM binds
Date: Wed, 8 Oct 2025 22:52:22 +0000 [thread overview]
Message-ID: <5f3936a32079971fa490b1dccfd69d2558df7aaf.camel@intel.com> (raw)
In-Reply-To: <20251008200051.3423684-1-matthew.brost@intel.com>
On Wed, 2025-10-08 at 13:00 -0700, Matthew Brost wrote:
> An array of VM binds can potentially evict other buffer objects (BOs)
> within the same VM under certain conditions, which may lead to NULL
> pointer dereferences later in the bind pipeline. To prevent this, clear
> the allow_res_evict flag in the xe_bo_validate call.
>
> Cc: stable@vger.kernel.org
> Reported-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
> Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6268
I can confirm it fixes the issue for me. Thanks!!
I did some quick testing, but everything still seems to work, so feel
free to add:
Tested-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
> Fixes: 774b5fa509a9 ("drm/xe: Avoid evicting object of the same vm in none fault mode")
> Fixes: 77f2ef3f16f5 ("drm/xe: Lock all gpuva ops during VM bind IOCTL")
> Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm.c | 25 ++++++++++++++++---------
> drivers/gpu/drm/xe/xe_vm_types.h | 1 +
> 2 files changed, 17 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 4e914928e0a9..468176cf901e 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2834,7 +2834,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
> }
>
> static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
> - bool validate)
> + bool no_res_evict, bool validate)
> {
> struct xe_bo *bo = xe_vma_bo(vma);
> struct xe_vm *vm = xe_vma_vm(vma);
> @@ -2845,7 +2845,8 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
> err = drm_exec_lock_obj(exec, &bo->ttm.base);
> if (!err && validate)
> err = xe_bo_validate(bo, vm,
> - !xe_vm_in_preempt_fence_mode(vm), exec);
> + !xe_vm_in_preempt_fence_mode(vm) &&
> + !no_res_evict, exec);
> }
>
> return err;
> @@ -2915,14 +2916,16 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
> }
>
> static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> - struct xe_vma_op *op)
> + struct xe_vma_ops *vops, struct xe_vma_op *op)
> {
> int err = 0;
> + bool no_res_evict = vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS;
>
> switch (op->base.op) {
> case DRM_GPUVA_OP_MAP:
> if (!op->map.invalidate_on_bind)
> err = vma_lock_and_validate(exec, op->map.vma,
> + no_res_evict,
> !xe_vm_in_fault_mode(vm) ||
> op->map.immediate);
> break;
> @@ -2933,11 +2936,13 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
>
> err = vma_lock_and_validate(exec,
> gpuva_to_vma(op->base.remap.unmap->va),
> - false);
> + no_res_evict, false);
> if (!err && op->remap.prev)
> - err = vma_lock_and_validate(exec, op->remap.prev, true);
> + err = vma_lock_and_validate(exec, op->remap.prev,
> + no_res_evict, true);
> if (!err && op->remap.next)
> - err = vma_lock_and_validate(exec, op->remap.next, true);
> + err = vma_lock_and_validate(exec, op->remap.next,
> + no_res_evict, true);
> break;
> case DRM_GPUVA_OP_UNMAP:
> err = check_ufence(gpuva_to_vma(op->base.unmap.va));
> @@ -2946,7 +2951,7 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
>
> err = vma_lock_and_validate(exec,
> gpuva_to_vma(op->base.unmap.va),
> - false);
> + no_res_evict, false);
> break;
> case DRM_GPUVA_OP_PREFETCH:
> {
> @@ -2961,7 +2966,7 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
>
> err = vma_lock_and_validate(exec,
> gpuva_to_vma(op->base.prefetch.va),
> - false);
> + no_res_evict, false);
> if (!err && !xe_vma_has_no_bo(vma))
> err = xe_bo_migrate(xe_vma_bo(vma),
> region_to_mem_type[region],
> @@ -3007,7 +3012,7 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
> return err;
>
> list_for_each_entry(op, &vops->list, link) {
> - err = op_lock_and_prep(exec, vm, op);
> + err = op_lock_and_prep(exec, vm, vops, op);
> if (err)
> return err;
> }
> @@ -3640,6 +3645,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> }
>
> xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
> + if (args->num_binds > 1)
> + vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS;
> for (i = 0; i < args->num_binds; ++i) {
> u64 range = bind_ops[i].range;
> u64 addr = bind_ops[i].addr;
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index da39940501d8..413353e1c225 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -476,6 +476,7 @@ struct xe_vma_ops {
> /** @flag: signify the properties within xe_vma_ops*/
> #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
> #define XE_VMA_OPS_FLAG_MADVISE BIT(1)
> +#define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2)
> u32 flags;
> #ifdef TEST_VM_OPS_ERROR
> /** @inject_error: inject error to test error handling */
next prev parent reply other threads:[~2025-10-08 22:52 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-08 20:00 [PATCH] drm/xe: Don't allow evicting of BOs in same VM in array of VM binds Matthew Brost
2025-10-08 20:07 ` ✓ CI.KUnit: success for " Patchwork
2025-10-08 21:03 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-08 22:52 ` Zanoni, Paulo R [this message]
2025-10-09 9:49 ` [PATCH] " Thomas Hellström
2025-10-09 10:47 ` Matthew Brost
2025-10-09 0:44 ` ✗ Xe.CI.Full: failure for " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5f3936a32079971fa490b1dccfd69d2558df7aaf.camel@intel.com \
--to=paulo.r.zanoni@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox