From: Matthew Brost <matthew.brost@intel.com>
To: Tejas Upadhyay <tejas.upadhyay@intel.com>
Cc: <intel-xe@lists.freedesktop.org>, <matthew.auld@intel.com>,
<thomas.hellstrom@linux.intel.com>,
<himal.prasad.ghimiray@intel.com>
Subject: Re: [PATCH] drm/xe/svm: Use xe_map_resource_to_region helper instead of direct access
Date: Mon, 6 Apr 2026 13:23:44 -0700 [thread overview]
Message-ID: <adQWUJSpnkGCfe+X@gsse-cloud1.jf.intel.com> (raw)
In-Reply-To: <20260406054249.2769479-2-tejas.upadhyay@intel.com>
On Mon, Apr 06, 2026 at 11:12:50AM +0530, Tejas Upadhyay wrote:
> Renaming:
> The helper function res_to_mem_region is now xe_map_resource_to_region.
>
> Abstraction:
> The patch removes instances where block->private was accessed directly
> to obtain VRAM region data.
>
I'd include the 'why'... e.g.,
The patch removes instances where block->private was accessed directly
to obtain VRAM region data allowing VRAM manager to own block->private
for future use cases.
> Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
> ---
> drivers/gpu/drm/xe/xe_bo.c | 21 ++++-----------------
> drivers/gpu/drm/xe/xe_svm.c | 8 +-------
> drivers/gpu/drm/xe/xe_vram.c | 22 ++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vram.h | 2 ++
> 4 files changed, 29 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index a7c2dc7f224c..daac53168dba 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -173,19 +173,6 @@ mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
> return tile->migrate;
> }
>
> -static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res)
> -{
> - struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
> - struct ttm_resource_manager *mgr;
> - struct xe_ttm_vram_mgr *vram_mgr;
> -
> - xe_assert(xe, resource_is_vram(res));
> - mgr = ttm_manager_type(&xe->ttm, res->mem_type);
> - vram_mgr = to_xe_ttm_vram_mgr(mgr);
> -
> - return container_of(vram_mgr, struct xe_vram_region, ttm);
> -}
> -
> static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
> u32 bo_flags, u32 *c)
> {
> @@ -635,7 +622,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
> return 0;
> case XE_PL_VRAM0:
> case XE_PL_VRAM1: {
> - struct xe_vram_region *vram = res_to_mem_region(mem);
> + struct xe_vram_region *vram = xe_map_resource_to_region(mem);
>
> if (!xe_ttm_resource_visible(mem))
> return -EINVAL;
> @@ -1642,7 +1629,7 @@ static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
> if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
> return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
>
> - vram = res_to_mem_region(ttm_bo->resource);
> + vram = xe_map_resource_to_region(ttm_bo->resource);
> xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
> return (vram->io_start + cursor.start) >> PAGE_SHIFT;
> }
> @@ -1782,7 +1769,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
> goto out;
> }
>
> - vram = res_to_mem_region(ttm_bo->resource);
> + vram = xe_map_resource_to_region(ttm_bo->resource);
> xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
> xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
>
> @@ -2923,7 +2910,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
> case XE_PL_SYSTEM:
> return 0;
> default:
> - return res_to_mem_region(res)->dpa_base;
> + return xe_map_resource_to_region(res)->dpa_base;
> }
> return 0;
> }
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 5933b2b6392b..71fbc75bd0da 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -790,7 +790,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
> int j = 0;
>
> list_for_each_entry(block, blocks, link) {
> - struct xe_vram_region *vr = block->private;
> + struct xe_vram_region *vr = xe_map_resource_to_region(res);
> struct gpu_buddy *buddy = vram_to_buddy(vr);
You can move 'vr' and 'buddy' to function level variables.
With the nits fixed:
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> u64 block_pfn = block_offset_to_pfn(devmem_allocation->dpagemap,
> gpu_buddy_block_offset(block));
> @@ -1061,9 +1061,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
> struct dma_fence *pre_migrate_fence = NULL;
> struct xe_device *xe = vr->xe;
> struct device *dev = xe->drm.dev;
> - struct gpu_buddy_block *block;
> struct xe_validation_ctx vctx;
> - struct list_head *blocks;
> struct drm_exec exec;
> struct xe_bo *bo;
> int err = 0, idx;
> @@ -1100,10 +1098,6 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
> &dpagemap_devmem_ops, dpagemap, end - start,
> pre_migrate_fence);
>
> - blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
> - list_for_each_entry(block, blocks, link)
> - block->private = vr;
> -
> xe_bo_get(bo);
>
> /* Ensure the device has a pm ref while there are device pages active. */
> diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
> index 0538dcb8b18c..23eb7edbdd57 100644
> --- a/drivers/gpu/drm/xe/xe_vram.c
> +++ b/drivers/gpu/drm/xe/xe_vram.c
> @@ -13,6 +13,7 @@
> #include "regs/xe_gt_regs.h"
> #include "regs/xe_regs.h"
> #include "xe_assert.h"
> +#include "xe_bo.h"
> #include "xe_device.h"
> #include "xe_force_wake.h"
> #include "xe_gt_mcr.h"
> @@ -249,6 +250,27 @@ static int vram_region_init(struct xe_device *xe, struct xe_vram_region *vram,
> return 0;
> }
>
> +/**
> + * xe_map_resource_to_region - Map ttm resource to vram memory region
> + * @res: The ttm resource
> + *
> + * Get vram memory region using vram memory manager managing this resource
> + *
> + * Returns: pointer to xe_vram_region
> + */
> +struct xe_vram_region *xe_map_resource_to_region(struct ttm_resource *res)
> +{
> + struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
> + struct ttm_resource_manager *mgr;
> + struct xe_ttm_vram_mgr *vram_mgr;
> +
> + xe_assert(xe, mem_type_is_vram(res->mem_type));
> + mgr = ttm_manager_type(&xe->ttm, res->mem_type);
> + vram_mgr = to_xe_ttm_vram_mgr(mgr);
> +
> + return container_of(vram_mgr, struct xe_vram_region, ttm);
> +}
> +
> /**
> * xe_vram_probe() - Probe VRAM configuration
> * @xe: the &xe_device
> diff --git a/drivers/gpu/drm/xe/xe_vram.h b/drivers/gpu/drm/xe/xe_vram.h
> index 72860f714fc6..dd1c8bf17922 100644
> --- a/drivers/gpu/drm/xe/xe_vram.h
> +++ b/drivers/gpu/drm/xe/xe_vram.h
> @@ -10,7 +10,9 @@
>
> struct xe_device;
> struct xe_vram_region;
> +struct ttm_resource;
>
> +struct xe_vram_region *xe_map_resource_to_region(struct ttm_resource *res);
> int xe_vram_probe(struct xe_device *xe);
>
> struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement);
> --
> 2.52.0
>
next prev parent reply other threads:[~2026-04-06 20:23 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-06 5:42 [PATCH] drm/xe/svm: Use xe_map_resource_to_region helper instead of direct access Tejas Upadhyay
2026-04-06 5:50 ` ✓ CI.KUnit: success for " Patchwork
2026-04-06 6:25 ` ✓ Xe.CI.BAT: " Patchwork
2026-04-06 7:34 ` ✓ Xe.CI.FULL: " Patchwork
2026-04-06 20:23 ` Matthew Brost [this message]
2026-04-07 5:31 ` [PATCH] " Upadhyay, Tejas
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=adQWUJSpnkGCfe+X@gsse-cloud1.jf.intel.com \
--to=matthew.brost@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.auld@intel.com \
--cc=tejas.upadhyay@intel.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox