From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: Matthew Brost <matthew.brost@intel.com>
Cc: intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
himal.prasad.ghimiray@intel.com, apopple@nvidia.com,
airlied@gmail.com, "Simona Vetter" <simona.vetter@ffwll.ch>,
felix.kuehling@amd.com,
"Christian König" <christian.koenig@amd.com>,
dakr@kernel.org, "Mrozek, Michal" <michal.mrozek@intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>
Subject: Re: [PATCH 10/15] drm/xe: Use the vma attibute drm_pagemap to select where to migrate
Date: Wed, 29 Oct 2025 15:56:34 +0100 [thread overview]
Message-ID: <f92f89d247e296531640026d6226f1ead1a03b7c.camel@linux.intel.com> (raw)
In-Reply-To: <aQGJkJqG9Un1i3lP@lstrano-desk.jf.intel.com>
On Tue, 2025-10-28 at 20:27 -0700, Matthew Brost wrote:
> On Sat, Oct 25, 2025 at 02:04:07PM +0200, Thomas Hellström wrote:
> > Enable migrating to foreign drm_pagemaps.
> >
>
> Slightly better commit message ahead of merging.
>
> > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_svm.c | 39 +++++++++++++++-------------
> > ----
> > drivers/gpu/drm/xe/xe_svm.h | 8 +++----
> > drivers/gpu/drm/xe/xe_vm.c | 19 ++++++----------
> > drivers/gpu/drm/xe/xe_vm_types.h | 6 ++---
> > 4 files changed, 32 insertions(+), 40 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_svm.c
> > b/drivers/gpu/drm/xe/xe_svm.c
> > index d27cedeaf70c..36a6ac293e71 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.c
> > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > @@ -1132,9 +1132,9 @@ static int __xe_svm_handle_pagefault(struct
> > xe_vm *vm, struct xe_vma *vma,
> > if (err)
> > return err;
> >
> > - dpagemap = xe_vma_resolve_pagemap(vma, tile);
> > - ctx.device_private_page_owner =
> > - xe_svm_private_page_owner(vm, !dpagemap &&
> > !ctx.devmem_only);
> > + dpagemap = ctx.devmem_only ? xe_tile_local_pagemap(tile) :
> > + xe_vma_resolve_pagemap(vma, tile);
> > + ctx.device_private_page_owner =
> > xe_svm_private_page_owner(vm, !dpagemap);
> > range = xe_svm_range_find_or_insert(vm, fault_addr, vma,
> > &ctx);
> >
> > if (IS_ERR(range))
> > @@ -1159,13 +1159,8 @@ static int __xe_svm_handle_pagefault(struct
> > xe_vm *vm, struct xe_vma *vma,
> > xe_svm_range_needs_migrate_to_vram(range, vma,
> > !!dpagemap || ctx.devmem_only)) {
> > ktime_t migrate_start = xe_svm_stats_ktime_get();
> >
> > - /* TODO : For multi-device dpagemap will be used
> > to find the
> > - * remote tile and remote device. Will need to
> > modify
> > - * xe_svm_alloc_vram to use dpagemap for future
> > multi-device
> > - * support.
> > - */
> > xe_svm_range_migrate_count_stats_incr(gt, range);
> > - err = xe_svm_alloc_vram(tile, range, &ctx);
> > + err = xe_svm_alloc_vram(range, &ctx, dpagemap);
> > xe_svm_range_migrate_us_stats_incr(gt, range,
> > migrate_start);
> > ctx.timeslice_ms <<= 1; /* Double
> > timeslice if we have to retry */
> > if (err) {
> > @@ -1482,7 +1477,13 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct
> > xe_vm *vm, u64 start, u64 end)
> > */
> > struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma,
> > struct xe_tile *tile)
> > {
> > - s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
> > + struct drm_pagemap *dpagemap = vma-
> > >attr.preferred_loc.dpagemap;
> > + s32 fd;
> > +
> > + if (dpagemap)
> > + return dpagemap;
> > +
> > + fd = (s32)vma->attr.preferred_loc.devmem_fd;
> >
> > if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
> > return NULL;
> > @@ -1490,28 +1491,24 @@ struct drm_pagemap
> > *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
> > if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
> > return IS_DGFX(tile_to_xe(tile)) ?
> > xe_tile_local_pagemap(tile) : NULL;
> >
> > - /* TODO: Support multi-device with drm_pagemap_from_fd(fd)
> > */
> > return NULL;
> > }
> >
> > /**
> > * xe_svm_alloc_vram()- Allocate device memory pages for range,
> > * migrating existing data.
> > - * @tile: tile to allocate vram from
> > * @range: SVM range
> > * @ctx: DRM GPU SVM context
> > + * @dpagemap: The struct drm_pagemap representing the memory to
> > allocate.
> > *
> > * Return: 0 on success, error code on failure.
> > */
> > -int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range
> > *range,
> > - const struct drm_gpusvm_ctx *ctx)
> > +int xe_svm_alloc_vram(struct xe_svm_range *range, const struct
> > drm_gpusvm_ctx *ctx,
> > + struct drm_pagemap *dpagemap)
> > {
> > - struct drm_pagemap *dpagemap;
> > -
> > - xe_assert(tile_to_xe(tile), range-
> > >base.pages.flags.migrate_devmem);
> > + xe_assert(range_to_vm(&range->base)->xe, range-
> > >base.pages.flags.migrate_devmem);
> > range_debug(range, "ALLOCATE VRAM");
> >
> > - dpagemap = xe_tile_local_pagemap(tile);
> > return drm_pagemap_populate_mm(dpagemap,
> > xe_svm_range_start(range),
> > xe_svm_range_end(range),
> > range->base.gpusvm->mm,
> > @@ -1778,9 +1775,9 @@ int xe_pagemap_cache_create(struct xe_tile
> > *tile)
> > return 0;
> > }
> >
> > -int xe_svm_alloc_vram(struct xe_tile *tile,
> > - struct xe_svm_range *range,
> > - const struct drm_gpusvm_ctx *ctx)
> > +int xe_svm_alloc_vram(struct xe_svm_range *range,
> > + const struct drm_gpusvm_ctx *ctx,
> > + struct drm_pagemap *dpagemap)
> > {
> > return -EOPNOTSUPP;
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_svm.h
> > b/drivers/gpu/drm/xe/xe_svm.h
> > index 5adce108f7eb..c7027facf6e9 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.h
> > +++ b/drivers/gpu/drm/xe/xe_svm.h
> > @@ -94,8 +94,8 @@ int xe_svm_bo_evict(struct xe_bo *bo);
> >
> > void xe_svm_range_debug(struct xe_svm_range *range, const char
> > *operation);
> >
> > -int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range
> > *range,
> > - const struct drm_gpusvm_ctx *ctx);
> > +int xe_svm_alloc_vram(struct xe_svm_range *range, const struct
> > drm_gpusvm_ctx *ctx,
> > + struct drm_pagemap *dpagemap);
> >
> > struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm,
> > u64 addr,
> > struct xe_vma
> > *vma, struct drm_gpusvm_ctx *ctx);
> > @@ -276,8 +276,8 @@ void xe_svm_range_debug(struct xe_svm_range
> > *range, const char *operation)
> > }
> >
> > static inline int
> > -xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range
> > *range,
> > - const struct drm_gpusvm_ctx *ctx)
> > +xe_svm_alloc_vram(struct xe_svm_range *range, const struct
> > drm_gpusvm_ctx *ctx,
> > + struct drm_pagemap *dpagemap)
> > {
> > return -EOPNOTSUPP;
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c
> > b/drivers/gpu/drm/xe/xe_vm.c
> > index 3c3dc1b1ace9..381d4b4abac9 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -2355,18 +2355,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm,
> > struct xe_vma_ops *vops,
> > if (prefetch_region ==
> > DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
> > dpagemap =
> > xe_vma_resolve_pagemap(vma,
> >
> > xe_device_get_root_tile(vm->xe));
> > - /*
> > - * TODO: Once multigpu support is
> > enabled will need
> > - * something to dereference tile
> > from dpagemap.
> > - */
> > - if (dpagemap)
> > - tile =
> > xe_device_get_root_tile(vm->xe);
> > } else if (prefetch_region) {
> > tile = &vm->xe-
> > >tiles[region_to_mem_type[prefetch_region] -
> >
> > XE_PL_VRAM0];
> > + dpagemap =
> > xe_tile_local_pagemap(tile);
>
> Per kernel test robot, dpagemap needs to initialized to NULL. There
> is
> existing code which tile to NULL after a for_each_tile loop that can
> also be droppped.
>
Yeah I noticed that. I'll fix this up.
Thanks,
Thomas
> Everything else looks right.
>
> Matt
>
> > }
> >
> > - op->prefetch_range.tile = tile;
> > + op->prefetch_range.dpagemap = dpagemap;
> > alloc_next_range:
> > svm_range =
> > xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
> >
> > @@ -2897,7 +2892,7 @@ static int prefetch_ranges(struct xe_vm *vm,
> > struct xe_vma_op *op)
> > {
> > bool devmem_possible = IS_DGFX(vm->xe) &&
> > IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
> > struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
> > - struct xe_tile *tile = op->prefetch_range.tile;
> > + struct drm_pagemap *dpagemap = op-
> > >prefetch_range.dpagemap;
> > int err = 0;
> >
> > struct xe_svm_range *svm_range;
> > @@ -2910,15 +2905,15 @@ static int prefetch_ranges(struct xe_vm
> > *vm, struct xe_vma_op *op)
> > ctx.read_only = xe_vma_read_only(vma);
> > ctx.devmem_possible = devmem_possible;
> > ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
> > - ctx.device_private_page_owner =
> > xe_svm_private_page_owner(vm, !tile);
> > + ctx.device_private_page_owner =
> > xe_svm_private_page_owner(vm, !dpagemap);
> >
> > /* TODO: Threading the migration */
> > xa_for_each(&op->prefetch_range.range, i, svm_range) {
> > - if (!tile)
> > + if (!dpagemap)
> > xe_svm_range_migrate_to_smem(vm,
> > svm_range);
> >
> > - if (xe_svm_range_needs_migrate_to_vram(svm_range,
> > vma, !!tile)) {
> > - err = xe_svm_alloc_vram(tile, svm_range,
> > &ctx);
> > + if (xe_svm_range_needs_migrate_to_vram(svm_range,
> > vma, !!dpagemap)) {
> > + err = xe_svm_alloc_vram(svm_range, &ctx,
> > dpagemap);
> > if (err) {
> > drm_dbg(&vm->xe->drm, "VRAM
> > allocation failed, retry from userspace, asid=%u, gpusvm=%p,
> > errno=%pe\n",
> > vm->usm.asid, &vm-
> > >svm.gpusvm, ERR_PTR(err));
> > diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> > b/drivers/gpu/drm/xe/xe_vm_types.h
> > index 70856d536047..5313bf2afa54 100644
> > --- a/drivers/gpu/drm/xe/xe_vm_types.h
> > +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> > @@ -413,10 +413,10 @@ struct xe_vma_op_prefetch_range {
> > /** @ranges_count: number of svm ranges to map */
> > u32 ranges_count;
> > /**
> > - * @tile: Pointer to the tile structure containing memory
> > to prefetch.
> > - * NULL if prefetch requested region is smem
> > + * @dpagemap: Pointer to the dpagemap structure containing
> > memory to prefetch.
> > + * NULL if prefetch requested region is smem
> > */
> > - struct xe_tile *tile;
> > + struct drm_pagemap *dpagemap;
> > };
> >
> > /** enum xe_vma_op_flags - flags for VMA operation */
> > --
> > 2.51.0
> >
next prev parent reply other threads:[~2025-10-29 14:56 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-25 12:03 [PATCH 00/15] Dynamic drm_pagemaps and Initial multi-device SVM Thomas Hellström
2025-10-25 12:03 ` [PATCH 01/15] drm/pagemap, drm/xe: Add refcounting to struct drm_pagemap Thomas Hellström
2025-10-29 0:31 ` Matthew Brost
2025-10-29 1:11 ` Matthew Brost
2025-10-29 14:51 ` Thomas Hellström
2025-10-25 12:03 ` [PATCH 02/15] drm/pagemap: Add a refcounted drm_pagemap backpointer to struct drm_pagemap_zdd Thomas Hellström
2025-10-29 0:33 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 03/15] drm/pagemap, drm/xe: Manage drm_pagemap provider lifetimes Thomas Hellström
2025-10-29 0:46 ` Matthew Brost
2025-10-29 14:49 ` Thomas Hellström
2025-10-30 2:46 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 04/15] drm/pagemap: Add a drm_pagemap cache and shrinker Thomas Hellström
2025-10-28 1:23 ` Matthew Brost
2025-10-28 9:46 ` Thomas Hellström
2025-10-28 10:29 ` Thomas Hellström
2025-10-28 18:38 ` Matthew Brost
2025-10-29 22:41 ` Matthew Brost
2025-10-29 22:48 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 05/15] drm/xe: Use the " Thomas Hellström
2025-10-30 0:43 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 06/15] drm/pagemap: Remove the drm_pagemap_create() interface Thomas Hellström
2025-10-29 1:00 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 07/15] drm/pagemap_util: Add a utility to assign an owner to a set of interconnected gpus Thomas Hellström
2025-10-29 1:21 ` Matthew Brost
2025-10-29 14:52 ` Thomas Hellström
2025-10-25 12:04 ` [PATCH 08/15] drm/xe: Use the drm_pagemap_util helper to get a svm pagemap owner Thomas Hellström
2025-10-27 23:02 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 09/15] drm/xe: Pass a drm_pagemap pointer around with the memory advise attributes Thomas Hellström
2025-10-28 0:35 ` Matthew Brost
2025-11-26 0:31 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 10/15] drm/xe: Use the vma attibute drm_pagemap to select where to migrate Thomas Hellström
2025-10-25 18:01 ` kernel test robot
2025-10-29 3:27 ` Matthew Brost
2025-10-29 14:56 ` Thomas Hellström [this message]
2025-10-29 16:59 ` kernel test robot
2025-10-25 12:04 ` [PATCH 11/15] drm/xe: Simplify madvise_preferred_mem_loc() Thomas Hellström
2025-10-27 23:14 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 12/15] drm/xe/uapi: Extend the madvise functionality to support foreign pagemap placement for svm Thomas Hellström
2025-10-28 0:51 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 13/15] drm/xe: Support pcie p2p dma as a fast interconnect Thomas Hellström
2025-10-28 1:14 ` Matthew Brost
2025-10-28 9:32 ` Thomas Hellström
2025-10-29 2:17 ` Matthew Brost
2025-10-29 14:54 ` Thomas Hellström
2025-10-25 12:04 ` [PATCH 14/15] drm/xe/vm: Add a prefetch debug printout Thomas Hellström
2025-10-27 23:16 ` Matthew Brost
2025-10-25 12:04 ` [PATCH 15/15] drm/xe: Retry migration once Thomas Hellström
2025-10-28 0:13 ` Matthew Brost
2025-10-28 9:11 ` Thomas Hellström
2025-10-28 19:03 ` Matthew Brost
2025-10-25 12:16 ` ✗ CI.checkpatch: warning for Dynamic drm_pagemaps and Initial multi-device SVM Patchwork
2025-10-25 12:17 ` ✓ CI.KUnit: success " Patchwork
2025-10-25 13:06 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-25 14:14 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f92f89d247e296531640026d6226f1ead1a03b7c.camel@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=felix.kuehling@amd.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=joonas.lahtinen@linux.intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.mrozek@intel.com \
--cc=simona.vetter@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox