All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Cc: intel-xe@lists.freedesktop.org
Subject: Re: [Intel-xe] [PATCH v3 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec
Date: Thu, 31 Aug 2023 18:07:59 +0000	[thread overview]
Message-ID: <ZPDW/59htJrQx7Ap@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <20230831092937.2197-7-thomas.hellstrom@linux.intel.com>

On Thu, Aug 31, 2023 at 11:29:37AM +0200, Thomas Hellström wrote:
> The VM_BIND functionality and vma destruction was locking
> potentially multiple dma_resv objects using the
> ttm_eu_reserve_buffers() function. Rework those to use the drm_exec
> helper, taking care that any calls to xe_bo_validate() ends up
> inside an unsealed locking transaction.
> 
> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c | 91 +++++++++++++++-----------------------
>  1 file changed, 36 insertions(+), 55 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 1547467c7d92..defa4299f172 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1102,27 +1102,21 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
>  
>  static void xe_vma_destroy_unlocked(struct xe_vma *vma)
>  {
> -	struct ttm_validate_buffer tv[2];
> -	struct ww_acquire_ctx ww;
>  	struct xe_bo *bo = xe_vma_bo(vma);
> -	LIST_HEAD(objs);
> -	LIST_HEAD(dups);
> +	struct drm_exec exec;
>  	int err;
>  
> -	memset(tv, 0, sizeof(tv));
> -	tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
> -	list_add(&tv[0].head, &objs);
> -
> -	if (bo) {
> -		tv[1].bo = &xe_bo_get(bo)->ttm;

You catch this but here is get you delete.

> -		list_add(&tv[1].head, &objs);
> +	drm_exec_init(&exec, 0);
> +	drm_exec_until_all_locked(&exec) {
> +		err = xe_vm_prepare_vma(&exec, vma, 0);
> +		drm_exec_retry_on_contention(&exec);
> +		if (XE_WARN_ON(err))
> +			break;
>  	}
> -	err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
> -	XE_WARN_ON(err);
>  
>  	xe_vma_destroy(vma, NULL);
>  
> -	ttm_eu_backoff_reservation(&ww, &objs);
> +	drm_exec_fini(&exec);
>  	if (bo)
>  		xe_bo_put(bo);

So the put here needs to be dropped. Drm exec has the proper gets / puts
in place.

>  }
> @@ -2138,12 +2132,6 @@ struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
>  	return &vm->pt_root[idx]->bo->ttm;
>  }
>  
> -static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
> -{
> -	tv->num_shared = 1;
> -	tv->bo = xe_vm_ttm_bo(vm);
> -}
> -
>  static void vm_set_async_error(struct xe_vm *vm, int err)
>  {
>  	lockdep_assert_held(&vm->lock);
> @@ -2644,42 +2632,16 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
>  	return err;
>  }
>  
> -static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> -			       struct xe_vma_op *op)
> +static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
> +		      struct xe_vma *vma, struct xe_vma_op *op)
>  {
> -	LIST_HEAD(objs);
> -	LIST_HEAD(dups);
> -	struct ttm_validate_buffer tv_bo, tv_vm;
> -	struct ww_acquire_ctx ww;
> -	struct xe_bo *vbo;
>  	int err;
>  
>  	lockdep_assert_held_write(&vm->lock);
>  
> -	xe_vm_tv_populate(vm, &tv_vm);
> -	list_add_tail(&tv_vm.head, &objs);
> -	vbo = xe_vma_bo(vma);
> -	if (vbo) {
> -		/*
> -		 * An unbind can drop the last reference to the BO and
> -		 * the BO is needed for ttm_eu_backoff_reservation so
> -		 * take a reference here.
> -		 */
> -		xe_bo_get(vbo);
> -
> -		if (!vbo->vm) {
> -			tv_bo.bo = &vbo->ttm;
> -			tv_bo.num_shared = 1;
> -			list_add(&tv_bo.head, &objs);
> -		}
> -	}
> -
> -again:
> -	err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
> -	if (err) {
> -		xe_bo_put(vbo);
> +	err = xe_vm_prepare_vma(exec, vma, 1);
> +	if (err)
>  		return err;
> -	}
>  
>  	xe_vm_assert_held(vm);
>  	xe_bo_assert_held(xe_vma_bo(vma));
> @@ -2758,17 +2720,36 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
>  		XE_WARN_ON("NOT POSSIBLE");
>  	}
>  
> -	ttm_eu_backoff_reservation(&ww, &objs);
> +	if (err)
> +		trace_xe_vma_fail(vma);
> +
> +	return err;
> +}
> +
> +static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> +			       struct xe_vma_op *op)
> +{
> +	struct drm_exec exec;
> +	int err;
> +
> +retry_userptr:
> +	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
> +	drm_exec_until_all_locked(&exec) {
> +		err = op_execute(&exec, vm, vma, op);
> +		drm_exec_retry_on_contention(&exec);
> +		if (err)
> +			break;
> +	}
> +	drm_exec_fini(&exec);
> +
>  	if (err == -EAGAIN && xe_vma_is_userptr(vma)) {

Not related to this patch but I think we can drop the userptr retry as
the exec loop or preempt rebind worker should catch this. It is redunant
to have it here.

Matt

>  		lockdep_assert_held_write(&vm->lock);
>  		err = xe_vma_userptr_pin_pages(vma);
>  		if (!err)
> -			goto again;
> -	}
> -	xe_bo_put(vbo);
> +			goto retry_userptr;
>  
> -	if (err)
>  		trace_xe_vma_fail(vma);
> +	}
>  
>  	return err;
>  }
> -- 
> 2.41.0
> 

  parent reply	other threads:[~2023-08-31 18:09 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-31  9:29 [Intel-xe] [PATCH v3 0/6] drm/xe: Convert to drm_exec Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 1/6] drm/xe/bo: Simplify xe_bo_lock() Thomas Hellström
2023-08-31 14:43   ` Thomas Hellström
2023-08-31 17:01     ` Matthew Brost
2023-08-31 17:48       ` Thomas Hellström
2023-08-31 18:33         ` Matthew Brost
2023-09-01 11:59           ` Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 2/6] drm/xe/vm: Simplify and document xe_vm_lock() Thomas Hellström
2023-08-31 17:06   ` Matthew Brost
2023-08-31 17:49     ` Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 3/6] drm/xe/bo: Remove the lock_no_vm()/unlock_no_vm() interface Thomas Hellström
2023-08-31 17:10   ` Matthew Brost
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 4/6] drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper Thomas Hellström
2023-08-31 17:51   ` Matthew Brost
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 5/6] drm/xe: Convert pagefaulting code to use drm_exec Thomas Hellström
2023-08-31 17:58   ` Matthew Brost
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec Thomas Hellström
2023-08-31 14:42   ` Thomas Hellström
2023-08-31 18:07   ` Matthew Brost [this message]
2023-08-31 10:40 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Convert to drm_exec (rev2) Patchwork
2023-08-31 10:41 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-08-31 10:42 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✗ CI.Hooks: failure " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✗ CI.checksparse: warning " Patchwork
2023-08-31 11:20 ` [Intel-xe] ✗ CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZPDW/59htJrQx7Ap@DUT025-TGLU.fm.intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.