Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
To: Matthew Brost <matthew.brost@intel.com>
Cc: intel-xe@lists.freedesktop.org
Subject: Re: [Intel-xe] [PATCH v2 19/31] drm/xe: Reduce the number list links in xe_vma
Date: Mon, 8 May 2023 17:43:50 -0400	[thread overview]
Message-ID: <ZFltFm99mI7OcsCm@intel.com> (raw)
In-Reply-To: <20230502001727.3211096-20-matthew.brost@intel.com>

On Mon, May 01, 2023 at 05:17:15PM -0700, Matthew Brost wrote:
> 5 list links in can be squashed into a union in xe_vma as being on the
> various list is mutually exclusive.
> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

> ---
>  drivers/gpu/drm/xe/xe_gt_pagefault.c |  2 +-
>  drivers/gpu/drm/xe/xe_pt.c           |  5 +-
>  drivers/gpu/drm/xe/xe_vm.c           | 29 ++++++------
>  drivers/gpu/drm/xe/xe_vm_types.h     | 71 +++++++++++++++-------------
>  4 files changed, 55 insertions(+), 52 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> index cfffe3398fe4..d7bf6b0a0697 100644
> --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> @@ -157,7 +157,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
>  
>  	if (xe_vma_is_userptr(vma) && write_locked) {
>  		spin_lock(&vm->userptr.invalidated_lock);
> -		list_del_init(&vma->userptr.invalidate_link);
> +		list_del_init(&vma->invalidate_link);
>  		spin_unlock(&vm->userptr.invalidated_lock);
>  
>  		ret = xe_vma_userptr_pin_pages(vma);
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index 010f44260cda..8eab8e1bbaf0 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -1116,8 +1116,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
>  
>  		vma->userptr.divisor = divisor << 1;
>  		spin_lock(&vm->userptr.invalidated_lock);
> -		list_move_tail(&vma->userptr.invalidate_link,
> -			       &vm->userptr.invalidated);
> +		list_move_tail(&vma->invalidate_link, &vm->userptr.invalidated);
>  		spin_unlock(&vm->userptr.invalidated_lock);
>  		return true;
>  	}
> @@ -1724,7 +1723,7 @@ __xe_pt_unbind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
>  
>  		if (!vma->gt_present) {
>  			spin_lock(&vm->userptr.invalidated_lock);
> -			list_del_init(&vma->userptr.invalidate_link);
> +			list_del_init(&vma->invalidate_link);
>  			spin_unlock(&vm->userptr.invalidated_lock);
>  		}
>  		up_read(&vm->userptr.notifier_lock);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index e0ed7201aeb0..e5f2fffb2aec 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -677,8 +677,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
>  	if (!xe_vm_in_fault_mode(vm) &&
>  	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->gt_present) {
>  		spin_lock(&vm->userptr.invalidated_lock);
> -		list_move_tail(&vma->userptr.invalidate_link,
> -			       &vm->userptr.invalidated);
> +		list_move_tail(&vma->invalidate_link, &vm->userptr.invalidated);
>  		spin_unlock(&vm->userptr.invalidated_lock);
>  	}
>  
> @@ -726,8 +725,8 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
>  	/* Collect invalidated userptrs */
>  	spin_lock(&vm->userptr.invalidated_lock);
>  	list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
> -				 userptr.invalidate_link) {
> -		list_del_init(&vma->userptr.invalidate_link);
> +				 invalidate_link) {
> +		list_del_init(&vma->invalidate_link);
>  		list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
>  	}
>  	spin_unlock(&vm->userptr.invalidated_lock);
> @@ -830,12 +829,11 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
>  		return vma;
>  	}
>  
> -	/* FIXME: Way to many lists, should be able to reduce this */
> +	/*
> +	 * userptr_link, destroy_link, notifier.rebind_link,
> +	 * invalidate_link
> +	 */
>  	INIT_LIST_HEAD(&vma->rebind_link);
> -	INIT_LIST_HEAD(&vma->unbind_link);
> -	INIT_LIST_HEAD(&vma->userptr_link);
> -	INIT_LIST_HEAD(&vma->userptr.invalidate_link);
> -	INIT_LIST_HEAD(&vma->notifier.rebind_link);
>  	INIT_LIST_HEAD(&vma->extobj.link);
>  
>  	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
> @@ -953,15 +951,14 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
>  	struct xe_vm *vm = xe_vma_vm(vma);
>  
>  	lockdep_assert_held_write(&vm->lock);
> -	XE_BUG_ON(!list_empty(&vma->unbind_link));
>  
>  	if (xe_vma_is_userptr(vma)) {
>  		XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED));
>  
>  		spin_lock(&vm->userptr.invalidated_lock);
> -		list_del_init(&vma->userptr.invalidate_link);
> +		if (!list_empty(&vma->invalidate_link))
> +			list_del_init(&vma->invalidate_link);
>  		spin_unlock(&vm->userptr.invalidated_lock);
> -		list_del(&vma->userptr_link);
>  	} else if (!xe_vma_is_null(vma)) {
>  		xe_bo_assert_held(xe_vma_bo(vma));
>  		drm_gpuva_unlink(&vma->gpuva);
> @@ -1328,7 +1325,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
>  			continue;
>  		}
>  
> -		list_add_tail(&vma->unbind_link, &contested);
> +		if (!list_empty(&vma->destroy_link))
> +			list_del_init(&vma->destroy_link);
> +		list_add_tail(&vma->destroy_link, &contested);
>  	}
>  
>  	/*
> @@ -1356,8 +1355,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
>  	 * Since we hold a refcount to the bo, we can remove and free
>  	 * the members safely without locking.
>  	 */
> -	list_for_each_entry_safe(vma, next_vma, &contested, unbind_link) {
> -		list_del_init(&vma->unbind_link);
> +	list_for_each_entry_safe(vma, next_vma, &contested, destroy_link) {
> +		list_del_init(&vma->destroy_link);
>  		xe_vma_destroy_unlocked(vma);
>  	}
>  
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index d55ec8156caa..22def5483c12 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -50,21 +50,32 @@ struct xe_vma {
>  	 */
>  	u64 gt_present;
>  
> -	/** @userptr_link: link into VM repin list if userptr */
> -	struct list_head userptr_link;
> +	union {
> +		/** @userptr_link: link into VM repin list if userptr */
> +		struct list_head userptr_link;
>  
> -	/**
> -	 * @rebind_link: link into VM if this VMA needs rebinding, and
> -	 * if it's a bo (not userptr) needs validation after a possible
> -	 * eviction. Protected by the vm's resv lock.
> -	 */
> -	struct list_head rebind_link;
> +		/**
> +		 * @rebind_link: link into VM if this VMA needs rebinding, and
> +		 * if it's a bo (not userptr) needs validation after a possible
> +		 * eviction. Protected by the vm's resv lock.
> +		 */
> +		struct list_head rebind_link;
>  
> -	/**
> -	 * @unbind_link: link or list head if an unbind of multiple VMAs, in
> -	 * single unbind op, is being done.
> -	 */
> -	struct list_head unbind_link;
> +		/** @destroy_link: link for contested VMAs on VM close */
> +		struct list_head destroy_link;
> +
> +		/** @invalidate_link: Link for the vm::userptr.invalidated list */
> +		struct list_head invalidate_link;
> +
> +		struct {
> +			 /*
> +			  * @notifier.rebind_link: link for
> +			  * vm->notifier.rebind_list, protected by
> +			  * vm->notifier.list_lock
> +			  */
> +			struct list_head rebind_link;
> +		} notifier;
> +	};
>  
>  	/** @destroy_cb: callback to destroy VMA when unbind job is done */
>  	struct dma_fence_cb destroy_cb;
> @@ -72,10 +83,22 @@ struct xe_vma {
>  	/** @destroy_work: worker to destroy this BO */
>  	struct work_struct destroy_work;
>  
> +	/** @usm: unified shared memory state */
> +	struct {
> +		/** @gt_invalidated: VMA has been invalidated */
> +		u64 gt_invalidated;
> +	} usm;
> +
> +	struct {
> +		/**
> +		 * @extobj.link: Link into vm's external object list.
> +		 * protected by the vm lock.
> +		 */
> +		struct list_head link;
> +	} extobj;
> +
>  	/** @userptr: user pointer state */
>  	struct {
> -		/** @invalidate_link: Link for the vm::userptr.invalidated list */
> -		struct list_head invalidate_link;
>  		/**
>  		 * @notifier: MMU notifier for user pointer (invalidation call back)
>  		 */
> @@ -96,24 +119,6 @@ struct xe_vma {
>  		u32 divisor;
>  #endif
>  	} userptr;
> -
> -	/** @usm: unified shared memory state */
> -	struct {
> -		/** @gt_invalidated: VMA has been invalidated */
> -		u64 gt_invalidated;
> -	} usm;
> -
> -	struct {
> -		struct list_head rebind_link;
> -	} notifier;
> -
> -	struct {
> -		/**
> -		 * @extobj.link: Link into vm's external object list.
> -		 * protected by the vm lock.
> -		 */
> -		struct list_head link;
> -	} extobj;
>  };
>  
>  struct xe_device;
> -- 
> 2.34.1
> 

  reply	other threads:[~2023-05-08 21:44 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-02  0:16 [Intel-xe] [PATCH v2 00/31] Upstreaming prep / all of mbrosts patches Matthew Brost
2023-05-02  0:16 ` [Intel-xe] [PATCH v2 01/31] drm/sched: Add run_wq argument to drm_sched_init Matthew Brost
2023-05-03 12:03   ` Thomas Hellström
2023-05-03 15:06     ` Matthew Brost
2023-05-05 18:24       ` Rodrigo Vivi
2023-05-02  0:16 ` [Intel-xe] [PATCH v2 02/31] drm/sched: Move schedule policy to scheduler Matthew Brost
2023-05-03 12:13   ` Thomas Hellström
2023-05-03 15:11     ` Matthew Brost
2023-05-02  0:16 ` [Intel-xe] [PATCH v2 03/31] drm/sched: Add DRM_SCHED_POLICY_SINGLE_ENTITY scheduling policy Matthew Brost
2023-05-08 12:40   ` Thomas Hellström
2023-05-22  1:16     ` Matthew Brost
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 04/31] drm/xe: Use DRM_SCHED_POLICY_SINGLE_ENTITY mode Matthew Brost
2023-05-08 12:41   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 05/31] drm/xe: Long running job update Matthew Brost
2023-05-05 18:36   ` Rodrigo Vivi
2023-05-08  1:14     ` Matthew Brost
2023-05-08 13:14   ` Thomas Hellström
2023-05-09 14:56     ` Matthew Brost
2023-05-09 15:21       ` Thomas Hellström
2023-05-09 22:16         ` Matthew Brost
2023-05-10  8:15           ` Thomas Hellström
2023-05-09 22:21     ` Matthew Brost
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 06/31] drm/xe: Ensure LR engines are not persistent Matthew Brost
2023-05-05 18:38   ` Rodrigo Vivi
2023-05-08  1:03     ` Matthew Brost
2023-05-09 12:21   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 07/31] drm/xe: Only try to lock external BOs in VM bind Matthew Brost
2023-05-05 18:40   ` Rodrigo Vivi
2023-05-08  1:08     ` Matthew Brost
2023-05-08  1:15       ` Christopher Snowhill
2023-05-08 21:34       ` Rodrigo Vivi
2023-05-09 12:29         ` Thomas Hellström
2023-05-10 23:25           ` Matthew Brost
2023-05-11  7:43             ` Thomas Hellström
2023-05-08  1:17   ` Christopher Snowhill
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 08/31] drm/xe: VM LRU bulk move Matthew Brost
2023-05-08 21:39   ` Rodrigo Vivi
2023-05-09 22:09     ` Matthew Brost
2023-05-10  1:37       ` Rodrigo Vivi
2023-05-09 12:47   ` Thomas Hellström
2023-05-09 22:05     ` Matthew Brost
2023-05-10  8:14       ` Thomas Hellström
2023-05-10 18:40         ` Matthew Brost
2023-05-11  7:24           ` Thomas Hellström
2023-05-11 14:11             ` Matthew Brost
2023-05-12  9:03               ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 09/31] drm/xe/guc: Read HXG fields from DW1 of G2H response Matthew Brost
2023-05-05 18:50   ` Rodrigo Vivi
2023-05-09 12:49   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 10/31] drm/xe/guc: Return the lower part of blocking H2G message Matthew Brost
2023-05-05 18:52   ` Rodrigo Vivi
2023-05-08  1:10     ` Matthew Brost
2023-05-08  9:20       ` Michal Wajdeczko
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 11/31] drm/xe/guc: Use doorbells for submission if possible Matthew Brost
2023-05-08 21:42   ` Rodrigo Vivi
2023-05-10  0:49     ` Matthew Brost
2023-05-09 13:00   ` Thomas Hellström
2023-05-10  0:51     ` Matthew Brost
2023-05-21 12:32   ` Oded Gabbay
2023-06-08 19:30     ` Matthew Brost
2023-06-12 13:01       ` Oded Gabbay
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 12/31] drm/xe/guc: Print doorbell ID in GuC engine debugfs entry Matthew Brost
2023-05-05 18:55   ` Rodrigo Vivi
2023-05-09 13:01     ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 13/31] maple_tree: split up MA_STATE() macro Matthew Brost
2023-05-09 13:21   ` Thomas Hellström
2023-05-10  0:29     ` Matthew Brost
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 14/31] maple_tree: Export mas_preallocate Matthew Brost
2023-05-09 13:33   ` Thomas Hellström
2023-05-10  0:31     ` Matthew Brost
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 15/31] drm: manager to keep track of GPUs VA mappings Matthew Brost
2023-05-09 13:49   ` Thomas Hellström
2023-05-10  0:55     ` Matthew Brost
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 16/31] drm/xe: Port Xe to GPUVA Matthew Brost
2023-05-09 13:52   ` Thomas Hellström
2023-05-11  2:41     ` Matthew Brost
2023-05-11  7:39       ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 17/31] drm/xe: NULL binding implementation Matthew Brost
2023-05-09 14:34   ` Rodrigo Vivi
2023-05-11  2:52     ` Matthew Brost
2023-05-09 15:17   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 18/31] drm/xe: Avoid doing rebinds Matthew Brost
2023-05-09 14:48   ` Rodrigo Vivi
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 19/31] drm/xe: Reduce the number list links in xe_vma Matthew Brost
2023-05-08 21:43   ` Rodrigo Vivi [this message]
2023-05-11  8:38   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 20/31] drm/xe: Optimize size of xe_vma allocation Matthew Brost
2023-05-05 19:37   ` Rodrigo Vivi
2023-05-08  1:21     ` Matthew Brost
2023-05-11  9:05   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 21/31] drm/gpuva: Add drm device to GPUVA manager Matthew Brost
2023-05-05 19:39   ` Rodrigo Vivi
2023-05-11  9:06     ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 22/31] drm/gpuva: Move dma-resv " Matthew Brost
2023-05-11  9:10   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 23/31] drm/gpuva: Add support for extobj Matthew Brost
2023-05-11  9:35   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 24/31] drm/xe: Userptr refactor Matthew Brost
2023-05-05 19:41   ` Rodrigo Vivi
2023-05-11  9:46   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 25/31] drm: execution context for GEM buffers v3 Matthew Brost
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 26/31] drm/exec: Always compile drm_exec Matthew Brost
2023-05-09 14:45   ` Rodrigo Vivi
2023-05-10  0:37     ` Matthew Brost
2023-05-10  0:38     ` Matthew Brost
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 27/31] drm/xe: Use drm_exec for locking rather than TTM exec helpers Matthew Brost
2023-05-05 19:42   ` Rodrigo Vivi
2023-05-11 10:01   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 28/31] drm/xe: Allow dma-fences as in-syncs for compute / faulting VM Matthew Brost
2023-05-05 19:43   ` Rodrigo Vivi
2023-05-08  1:19     ` Matthew Brost
2023-05-08 21:29       ` Rodrigo Vivi
2023-05-11 10:03   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 29/31] drm/xe: Allow compute VMs to output dma-fences on binds Matthew Brost
2023-05-09 14:50   ` Rodrigo Vivi
2023-05-11 10:04   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 30/31] drm/xe: remove async worker, sync binds, new error handling Matthew Brost
2023-05-17 16:53   ` Thomas Hellström
2023-05-02  0:17 ` [Intel-xe] [PATCH v2 31/31] drm/xe/uapi: Add some VM bind kernel doc Matthew Brost
2023-05-05 19:45   ` Rodrigo Vivi
2023-05-11 10:14     ` Thomas Hellström
2023-05-02  0:20 ` [Intel-xe] ✗ CI.Patch_applied: failure for Upstreaming prep / all of mbrosts patches (rev2) Patchwork
2023-05-02  1:54   ` Christopher Snowhill (kode54)
2023-05-02  1:59   ` Christopher Snowhill (kode54)
2023-05-03 12:37 ` [Intel-xe] [PATCH v2 00/31] Upstreaming prep / all of mbrosts patches Thomas Hellström
2023-05-03 15:27   ` Matthew Brost

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZFltFm99mI7OcsCm@intel.com \
    --to=rodrigo.vivi@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox