From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: <intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
Date: Wed, 28 May 2025 16:12:33 -0700 [thread overview]
Message-ID: <aDeYYU+vwvc1fzB1@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20250527164003.1068118-8-himal.prasad.ghimiray@intel.com>
On Tue, May 27, 2025 at 10:09:51PM +0530, Himal Prasad Ghimiray wrote:
> Introduce xe_vm_range_tilemask_tlb_invalidation(), which issues a TLB
> invalidation for a specified address range across GTs indicated by a
> tilemask.
>
> Suggested-by: Matthew Brost <matthew.brost@intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
A couple nits, but feel free to post a follow up as independent patch to
merge ahead of madvise.
> ---
> drivers/gpu/drm/xe/xe_svm.c | 43 +--------------
> drivers/gpu/drm/xe/xe_vm.c | 103 ++++++++++++++++++++++++------------
> drivers/gpu/drm/xe/xe_vm.h | 3 ++
> 3 files changed, 75 insertions(+), 74 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 871ac81bb04a..59e73187114d 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -167,14 +167,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
> {
> struct xe_vm *vm = gpusvm_to_vm(gpusvm);
> struct xe_device *xe = vm->xe;
> - struct xe_tile *tile;
> struct drm_gpusvm_range *r, *first;
> - struct xe_gt_tlb_invalidation_fence
> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
> u8 tile_mask = 0;
> - u8 id;
> - u32 fence_id = 0;
> long err;
>
> xe_svm_assert_in_notifier(vm);
> @@ -220,42 +215,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
>
> xe_device_wmb(xe);
>
> - for_each_tile(tile, xe, id) {
> - if (tile_mask & BIT(id)) {
> - int err;
> -
> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> - &fence[fence_id], true);
> -
> - err = xe_gt_tlb_invalidation_range(tile->primary_gt,
> - &fence[fence_id],
> - adj_start,
> - adj_end,
> - vm->usm.asid);
> - if (WARN_ON_ONCE(err < 0))
> - goto wait;
> - ++fence_id;
> -
> - if (!tile->media_gt)
> - continue;
> -
> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> - &fence[fence_id], true);
> -
> - err = xe_gt_tlb_invalidation_range(tile->media_gt,
> - &fence[fence_id],
> - adj_start,
> - adj_end,
> - vm->usm.asid);
> - if (WARN_ON_ONCE(err < 0))
> - goto wait;
> - ++fence_id;
> - }
> - }
> -
> -wait:
> - for (id = 0; id < fence_id; ++id)
> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> + err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
> + XE_WARN_ON(err);
WARN_ON_ONCE
>
> range_notifier_event_end:
> r = first;
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index de6ecff237a6..d60b711e97e9 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -3851,6 +3851,68 @@ void xe_vm_unlock(struct xe_vm *vm)
> dma_resv_unlock(xe_vm_resv(vm));
> }
>
> +/**
> + * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
> + * address range
> + * @vm: The VM
> + * @start: start address
> + * @end: end address
> + * @tile_mask: mask for which gt's issue tlb invalidation
> + *
> + * Issue a range based TLB invalidation for gt's in tilemask
> + *
> + * Returns 0 for success, negative error code otherwise.
> + */
> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
> + u64 end, u8 tile_mask)
> +{
> + struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> + struct xe_tile *tile;
> + u32 fence_id = 0;
> + u8 id;
> + int err;
> +
> + if (!tile_mask)
> + return 0;
> +
> + for_each_tile(tile, vm->xe, id) {
> + if (tile_mask & BIT(id)) {
> + xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> + &fence[fence_id], true);
> +
> + err = xe_gt_tlb_invalidation_range(tile->primary_gt,
> + &fence[fence_id],
> + start,
> + end,
> + vm->usm.asid);
> + if (WARN_ON_ONCE(err < 0))
> + goto wait;
Let's just have the WARN_ON_ONCE in the SVM code at the caller - that is
the place where we can't really fail and warrents the warn.
> + ++fence_id;
> +
> + if (!tile->media_gt)
> + continue;
> +
> + xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> + &fence[fence_id], true);
> +
> + err = xe_gt_tlb_invalidation_range(tile->media_gt,
> + &fence[fence_id],
> + start,
> + end,
> + vm->usm.asid);
> + if (WARN_ON_ONCE(err < 0))
> + goto wait;
> + ++fence_id;
> + }
> + }
> +
> +wait:
> + for (id = 0; id < fence_id; ++id)
> + xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> +
> + return err;
> +}
> +
> /**
> * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
> * @vma: VMA to invalidate
> @@ -3865,11 +3927,9 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
> {
> struct xe_device *xe = xe_vma_vm(vma)->xe;
> struct xe_tile *tile;
> - struct xe_gt_tlb_invalidation_fence
> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> - u8 id;
> - u32 fence_id = 0;
> + u8 tile_mask = 0;
> int ret = 0;
> + u8 id;
>
> xe_assert(xe, !xe_vma_is_null(vma));
> xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
> @@ -3893,37 +3953,14 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
> }
> }
>
> - for_each_tile(tile, xe, id) {
> - if (xe_pt_zap_ptes(tile, vma)) {
> - xe_device_wmb(xe);
> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> - &fence[fence_id],
> - true);
> -
> - ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
> - &fence[fence_id], vma);
You can delete xe_gt_tlb_invalidation_vma now as this was the only
caller.
Matt
> - if (ret)
> - goto wait;
> - ++fence_id;
> + for_each_tile(tile, xe, id)
> + if (xe_pt_zap_ptes(tile, vma))
> + tile_mask |= BIT(id);
>
> - if (!tile->media_gt)
> - continue;
> + xe_device_wmb(xe);
>
> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> - &fence[fence_id],
> - true);
> -
> - ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
> - &fence[fence_id], vma);
> - if (ret)
> - goto wait;
> - ++fence_id;
> - }
> - }
> -
> -wait:
> - for (id = 0; id < fence_id; ++id)
> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> + ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
> + xe_vma_end(vma), tile_mask);
>
> vma->tile_invalidated = vma->tile_mask;
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 99e164852f63..1ef98113fa5b 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -228,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
> struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
> struct xe_svm_range *range);
>
> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
> + u64 end, u8 tile_mask);
> +
> int xe_vm_invalidate_vma(struct xe_vma *vma);
>
> int xe_vm_validate_protected(struct xe_vm *vm);
> --
> 2.34.1
>
next prev parent reply other threads:[~2025-05-28 23:11 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 01/19] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 02/19] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-05-28 16:27 ` Matthew Brost
2025-05-28 17:03 ` Souza, Jose
2025-05-29 18:03 ` Matthew Brost
2025-05-29 18:00 ` Matthew Brost
2025-06-10 4:32 ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 03/19] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
2025-05-28 16:46 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 04/19] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
2025-05-28 22:51 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-05-28 22:58 ` Matthew Brost
2025-06-02 6:19 ` Dan Carpenter
2025-05-27 16:39 ` [PATCH v3 06/19] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
2025-05-28 23:01 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation() Himal Prasad Ghimiray
2025-05-28 23:12 ` Matthew Brost [this message]
2025-05-29 3:21 ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
2025-05-28 23:15 ` Matthew Brost
2025-05-29 3:06 ` Ghimiray, Himal Prasad
2025-05-29 4:00 ` Matthew Brost
2025-05-30 6:29 ` Matthew Brost
2025-06-10 4:31 ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-05-29 2:49 ` Matthew Brost
2025-05-29 3:14 ` Ghimiray, Himal Prasad
2025-06-02 6:31 ` Dan Carpenter
2025-05-27 16:39 ` [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-05-29 22:43 ` Matthew Brost
2025-05-30 6:36 ` Matthew Brost
2025-05-30 21:34 ` Matthew Brost
2025-06-10 4:52 ` Ghimiray, Himal Prasad
2025-06-10 5:13 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
2025-05-29 22:54 ` Matthew Brost
2025-06-12 9:02 ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
2025-05-29 23:27 ` Matthew Brost
2025-05-29 23:38 ` Matthew Brost
2025-05-30 4:40 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 13/19] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
2025-05-29 23:42 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 14/19] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
2025-05-30 0:24 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 15/19] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
2025-05-28 16:29 ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-05-30 4:24 ` Matthew Brost
2025-06-24 18:56 ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
2025-05-28 17:02 ` Souza, Jose
2025-05-30 1:11 ` kernel test robot
2025-05-30 4:29 ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
2025-05-28 23:47 ` Matthew Brost
2025-05-29 2:29 ` Ghimiray, Himal Prasad
2025-05-27 16:40 ` [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
2025-05-28 23:46 ` Matthew Brost
2025-05-29 3:03 ` Ghimiray, Himal Prasad
2025-05-29 18:24 ` Matthew Brost
2025-05-29 18:30 ` Matthew Brost
2025-05-27 21:35 ` ✓ CI.Patch_applied: success for MADVISE FOR XE Patchwork
2025-05-27 21:35 ` ✗ CI.checkpatch: warning " Patchwork
2025-05-27 21:37 ` ✓ CI.KUnit: success " Patchwork
2025-05-27 21:40 ` ✗ CI.Build: failure " Patchwork
2025-05-28 7:45 ` ✓ CI.Patch_applied: success " Patchwork
2025-05-28 7:45 ` ✗ CI.checkpatch: warning " Patchwork
2025-05-28 7:46 ` ✓ CI.KUnit: success " Patchwork
2025-05-28 7:50 ` ✗ CI.Build: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aDeYYU+vwvc1fzB1@lstrano-desk.jf.intel.com \
--to=matthew.brost@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox