Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Cc: intel-xe@lists.freedesktop.org
Subject: Re: [Intel-xe] [PATCH v3 3/6] drm/xe/bo: Remove the lock_no_vm()/unlock_no_vm() interface
Date: Thu, 31 Aug 2023 17:10:32 +0000	[thread overview]
Message-ID: <ZPDJiLBGL+qEK/Hy@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <20230831092937.2197-4-thomas.hellstrom@linux.intel.com>

On Thu, Aug 31, 2023 at 11:29:34AM +0200, Thomas Hellström wrote:
> Apart from asserts, it's essentially the same as
> xe_bo_lock()/xe_bo_unlock(), and the usage intentions of this interface
> was unclear. Remove it.
> 
> v2:
> - Update the xe_display subsystem as well.
> 
> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>

Reviewed-by: Matthew Brost <matthew.brost@intel.com>

> ---
>  drivers/gpu/drm/i915/display/intel_fb.c |  4 ++--
>  drivers/gpu/drm/xe/tests/xe_bo.c        |  2 +-
>  drivers/gpu/drm/xe/tests/xe_dma_buf.c   |  4 ++--
>  drivers/gpu/drm/xe/tests/xe_migrate.c   |  2 +-
>  drivers/gpu/drm/xe/xe_bo.h              | 23 ++---------------------
>  drivers/gpu/drm/xe/xe_dma_buf.c         |  5 +++--
>  drivers/gpu/drm/xe/xe_lrc.c             | 10 ++--------
>  7 files changed, 13 insertions(+), 37 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
> index e0bac4cf3f4b..f5a96b94cfba 100644
> --- a/drivers/gpu/drm/i915/display/intel_fb.c
> +++ b/drivers/gpu/drm/i915/display/intel_fb.c
> @@ -1892,9 +1892,9 @@ static void intel_user_framebuffer_destroy_vm(struct drm_framebuffer *fb)
>  		struct xe_bo *bo = intel_fb_obj(fb);
>  
>  		/* Unpin our kernel fb first */
> -		xe_bo_lock_no_vm(bo, NULL);
> +		xe_bo_lock(bo, false);
>  		xe_bo_unpin(bo);
> -		xe_bo_unlock_no_vm(bo);
> +		xe_bo_unlock(bo);
>  	}
>  	xe_bo_put(intel_fb_obj(fb));
>  #endif
> diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
> index c6025404042d..acc5ad01baaf 100644
> --- a/drivers/gpu/drm/xe/tests/xe_bo.c
> +++ b/drivers/gpu/drm/xe/tests/xe_bo.c
> @@ -143,7 +143,7 @@ static void ccs_test_run_gt(struct xe_device *xe, struct xe_gt *gt,
>  	ret = ccs_test_migrate(gt, bo, true, 0ULL, 0ULL, test);
>  
>  out_unlock:
> -	xe_bo_unlock_no_vm(bo);
> +	xe_bo_unlock(bo);
>  	xe_bo_put(bo);
>  }
>  
> diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
> index 513a3b3362e9..1c3f4bc72b99 100644
> --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
> +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
> @@ -148,14 +148,14 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
>  			int err;
>  
>  			/* Is everything where we expect it to be? */
> -			xe_bo_lock_no_vm(import_bo, NULL);
> +			xe_bo_lock(import_bo, false);
>  			err = xe_bo_validate(import_bo, NULL, false);
>  			if (err && err != -EINTR && err != -ERESTARTSYS)
>  				KUNIT_FAIL(test,
>  					   "xe_bo_validate() failed with err=%d\n", err);
>  
>  			check_residency(test, bo, import_bo, dmabuf);
> -			xe_bo_unlock_no_vm(import_bo);
> +			xe_bo_unlock(import_bo);
>  		}
>  		drm_gem_object_put(import);
>  	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
> diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
> index 8bb081086ca2..f58cd1da1a34 100644
> --- a/drivers/gpu/drm/xe/tests/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
> @@ -183,7 +183,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
>  
>  	xe_bo_vunmap(sysmem);
>  out_unlock:
> -	xe_bo_unlock_no_vm(sysmem);
> +	xe_bo_unlock(sysmem);
>  	xe_bo_put(sysmem);
>  }
>  
> diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
> index a7b9e7084225..9097bcc13209 100644
> --- a/drivers/gpu/drm/xe/xe_bo.h
> +++ b/drivers/gpu/drm/xe/xe_bo.h
> @@ -164,25 +164,6 @@ static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
>  	}
>  }
>  
> -static inline void xe_bo_lock_no_vm(struct xe_bo *bo,
> -				    struct ww_acquire_ctx *ctx)
> -{
> -	if (bo) {
> -		XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
> -				      bo->ttm.base.resv != &bo->ttm.base._resv));
> -		dma_resv_lock(bo->ttm.base.resv, ctx);
> -	}
> -}
> -
> -static inline void xe_bo_unlock_no_vm(struct xe_bo *bo)
> -{
> -	if (bo) {
> -		XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
> -				      bo->ttm.base.resv != &bo->ttm.base._resv));
> -		dma_resv_unlock(bo->ttm.base.resv);
> -	}
> -}
> -
>  int xe_bo_pin_external(struct xe_bo *bo);
>  int xe_bo_pin(struct xe_bo *bo);
>  void xe_bo_unpin_external(struct xe_bo *bo);
> @@ -197,9 +178,9 @@ static inline bool xe_bo_is_pinned(struct xe_bo *bo)
>  static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
>  {
>  	if (likely(bo)) {
> -		xe_bo_lock_no_vm(bo, NULL);
> +		xe_bo_lock(bo, false);
>  		xe_bo_unpin(bo);
> -		xe_bo_unlock_no_vm(bo);
> +		xe_bo_unlock(bo);
>  
>  		xe_bo_put(bo);
>  	}
> diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
> index 975dee1f770f..09343b8b3e96 100644
> --- a/drivers/gpu/drm/xe/xe_dma_buf.c
> +++ b/drivers/gpu/drm/xe/xe_dma_buf.c
> @@ -153,9 +153,10 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
>  	if (!reads)
>  		return 0;
>  
> -	xe_bo_lock_no_vm(bo, NULL);
> +	/* Can we do interruptible lock here? */
> +	xe_bo_lock(bo, false);
>  	(void)xe_bo_migrate(bo, XE_PL_TT);
> -	xe_bo_unlock_no_vm(bo);
> +	xe_bo_unlock(bo);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
> index 434fbb364b4b..6f899b6a4877 100644
> --- a/drivers/gpu/drm/xe/xe_lrc.c
> +++ b/drivers/gpu/drm/xe/xe_lrc.c
> @@ -790,15 +790,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
>  void xe_lrc_finish(struct xe_lrc *lrc)
>  {
>  	xe_hw_fence_ctx_finish(&lrc->fence_ctx);
> -	if (lrc->bo->vm)
> -		xe_vm_lock(lrc->bo->vm, false);
> -	else
> -		xe_bo_lock_no_vm(lrc->bo, NULL);
> +	xe_bo_lock(lrc->bo, false);
>  	xe_bo_unpin(lrc->bo);
> -	if (lrc->bo->vm)
> -		xe_vm_unlock(lrc->bo->vm);
> -	else
> -		xe_bo_unlock_no_vm(lrc->bo);
> +	xe_bo_unlock(lrc->bo);
>  	xe_bo_put(lrc->bo);
>  }
>  
> -- 
> 2.41.0
> 

  reply	other threads:[~2023-08-31 17:12 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-31  9:29 [Intel-xe] [PATCH v3 0/6] drm/xe: Convert to drm_exec Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 1/6] drm/xe/bo: Simplify xe_bo_lock() Thomas Hellström
2023-08-31 14:43   ` Thomas Hellström
2023-08-31 17:01     ` Matthew Brost
2023-08-31 17:48       ` Thomas Hellström
2023-08-31 18:33         ` Matthew Brost
2023-09-01 11:59           ` Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 2/6] drm/xe/vm: Simplify and document xe_vm_lock() Thomas Hellström
2023-08-31 17:06   ` Matthew Brost
2023-08-31 17:49     ` Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 3/6] drm/xe/bo: Remove the lock_no_vm()/unlock_no_vm() interface Thomas Hellström
2023-08-31 17:10   ` Matthew Brost [this message]
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 4/6] drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper Thomas Hellström
2023-08-31 17:51   ` Matthew Brost
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 5/6] drm/xe: Convert pagefaulting code to use drm_exec Thomas Hellström
2023-08-31 17:58   ` Matthew Brost
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec Thomas Hellström
2023-08-31 14:42   ` Thomas Hellström
2023-08-31 18:07   ` Matthew Brost
2023-08-31 10:40 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Convert to drm_exec (rev2) Patchwork
2023-08-31 10:41 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-08-31 10:42 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✗ CI.Hooks: failure " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✗ CI.checksparse: warning " Patchwork
2023-08-31 11:20 ` [Intel-xe] ✗ CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZPDJiLBGL+qEK/Hy@DUT025-TGLU.fm.intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox