public inbox for intel-xe@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Cc: intel-xe@lists.freedesktop.org,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	"Maarten Lankhorst" <dev@lankhorst.se>,
	"Michal Wajdeczko" <michal.wajdeczko@intel.com>
Subject: Re: [PATCH 1/3] drm/xe/mm: add XE DRM MM manager with shadow support
Date: Thu, 26 Mar 2026 12:48:18 -0700	[thread overview]
Message-ID: <acWNgikBoPxRNHGJ@gsse-cloud1.jf.intel.com> (raw)
In-Reply-To: <20260320121231.638189-2-satyanarayana.k.v.p@intel.com>

On Fri, Mar 20, 2026 at 12:12:29PM +0000, Satyanarayana K V P wrote:
> Add a xe_drm_mm manager to allocate sub-ranges from a BO-backed pool
> using drm_mm.
> 
> Signed-off-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Cc: Maarten Lankhorst <dev@lankhorst.se>
> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
> ---
>  drivers/gpu/drm/xe/Makefile          |   1 +
>  drivers/gpu/drm/xe/xe_drm_mm.c       | 200 +++++++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_drm_mm.h       |  55 ++++++++
>  drivers/gpu/drm/xe/xe_drm_mm_types.h |  42 ++++++
>  4 files changed, 298 insertions(+)
>  create mode 100644 drivers/gpu/drm/xe/xe_drm_mm.c
>  create mode 100644 drivers/gpu/drm/xe/xe_drm_mm.h
>  create mode 100644 drivers/gpu/drm/xe/xe_drm_mm_types.h
> 
> diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> index dab979287a96..6ab4e2392df1 100644
> --- a/drivers/gpu/drm/xe/Makefile
> +++ b/drivers/gpu/drm/xe/Makefile
> @@ -41,6 +41,7 @@ xe-y += xe_bb.o \
>  	xe_device_sysfs.o \
>  	xe_dma_buf.o \
>  	xe_drm_client.o \
> +	xe_drm_mm.o \
>  	xe_drm_ras.o \
>  	xe_eu_stall.o \
>  	xe_exec.o \
> diff --git a/drivers/gpu/drm/xe/xe_drm_mm.c b/drivers/gpu/drm/xe/xe_drm_mm.c
> new file mode 100644
> index 000000000000..c5b1766fa75a
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_drm_mm.c
> @@ -0,0 +1,200 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2026 Intel Corporation
> + */
> +
> +#include <drm/drm_managed.h>
> +#include <linux/kernel.h>
> +
> +#include "xe_device_types.h"
> +#include "xe_drm_mm_types.h"
> +#include "xe_drm_mm.h"
> +#include "xe_map.h"
> +
> +static void xe_drm_mm_manager_fini(struct drm_device *drm, void *arg)
> +{
> +	struct xe_drm_mm_manager *drm_mm_manager = arg;
> +	struct xe_bo *bo = drm_mm_manager->bo;
> +
> +	if (!bo) {
> +		drm_err(drm, "no bo for drm mm manager\n");
> +		return;
> +	}
> +
> +	drm_mm_takedown(&drm_mm_manager->base);
> +
> +	if (drm_mm_manager->is_iomem)
> +		kvfree(drm_mm_manager->cpu_addr);
> +
> +	drm_mm_manager->bo = NULL;
> +	drm_mm_manager->shadow = NULL;
> +}
> +
> +/**
> + * xe_drm_mm_manager_init() - Create and initialize the DRM MM manager.
> + * @tile: the &xe_tile where allocate.
> + * @size: number of bytes to allocate
> + * @guard: number of bytes to exclude from allocation for guard region
> + * @flags: additional flags for configuring the DRM MM manager.
> + *
> + * Initializes a DRM MM manager for managing memory allocations on a specific
> + * XE tile. The function allocates a buffer object to back the memory region
> + * managed by the DRM MM manager.
> + *
> + * Return: a pointer to the &xe_drm_mm_manager, or an error pointer on failure.
> + */
> +struct xe_drm_mm_manager *xe_drm_mm_manager_init(struct xe_tile *tile, u32 size,
> +						 u32 guard, u32 flags)
> +{
> +	struct xe_device *xe = tile_to_xe(tile);
> +	struct xe_drm_mm_manager *drm_mm_manager;
> +	u64 managed_size;
> +	struct xe_bo *bo;
> +	int ret;
> +
> +	xe_tile_assert(tile, size > guard);
> +	managed_size = size - guard;
> +
> +	drm_mm_manager = drmm_kzalloc(&xe->drm, sizeof(*drm_mm_manager), GFP_KERNEL);
> +	if (!drm_mm_manager)
> +		return ERR_PTR(-ENOMEM);
> +
> +	bo = xe_managed_bo_create_pin_map(xe, tile, size,
> +					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
> +					  XE_BO_FLAG_GGTT |
> +					  XE_BO_FLAG_GGTT_INVALIDATE |
> +					  XE_BO_FLAG_PINNED_NORESTORE);
> +	if (IS_ERR(bo)) {
> +		drm_err(&xe->drm, "Failed to prepare %uKiB BO for DRM MM manager (%pe)\n",
> +			size / SZ_1K, bo);
> +		return ERR_CAST(bo);
> +	}
> +	drm_mm_manager->bo = bo;
> +	drm_mm_manager->is_iomem = bo->vmap.is_iomem;
> +
> +	if (bo->vmap.is_iomem) {
> +		drm_mm_manager->cpu_addr = kvzalloc(managed_size, GFP_KERNEL);
> +		if (!drm_mm_manager->cpu_addr)
> +			return ERR_PTR(-ENOMEM);
> +	} else {
> +		drm_mm_manager->cpu_addr = bo->vmap.vaddr;
> +		memset(drm_mm_manager->cpu_addr, 0, bo->ttm.base.size);

I don't think you need this memset... As is in alloc_bb_pool() (patch
#3) the first thing done after calling xe_drm_mm_manager_init() is a
xe_map_memset(..., MI_NOOP, ...) on both the primary BO and shadow.

> +	}
> +
> +	if (flags & XE_DRM_MM_BO_MANAGER_FLAG_SHADOW) {
> +		struct xe_bo *shadow;
> +
> +		ret = drmm_mutex_init(&xe->drm, &drm_mm_manager->swap_guard);
> +		if (ret)
> +			return ERR_PTR(ret);
> +		if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
> +			fs_reclaim_acquire(GFP_KERNEL);
> +			might_lock(&drm_mm_manager->swap_guard);
> +			fs_reclaim_release(GFP_KERNEL);
> +		}
> +
> +		shadow = xe_managed_bo_create_pin_map(xe, tile, size,
> +						      XE_BO_FLAG_VRAM_IF_DGFX(tile) |
> +						      XE_BO_FLAG_GGTT |
> +						      XE_BO_FLAG_GGTT_INVALIDATE |
> +						      XE_BO_FLAG_PINNED_NORESTORE);
> +		if (IS_ERR(shadow)) {
> +			drm_err(&xe->drm,
> +				"Failed to prepare %uKiB shadow BO for DRM MM manager (%pe)\n",
> +				size / SZ_1K, shadow);
> +			return ERR_CAST(shadow);
> +		}
> +		drm_mm_manager->shadow = shadow;
> +	}
> +
> +	drm_mm_init(&drm_mm_manager->base, 0, managed_size);
> +	ret = drmm_add_action_or_reset(&xe->drm, xe_drm_mm_manager_fini, drm_mm_manager);
> +	if (ret)
> +		return ERR_PTR(ret);
> +
> +	return drm_mm_manager;
> +}
> +
> +/**
> + * xe_drm_mm_bo_swap_shadow() - Swap the primary BO with the shadow BO.
> + * @drm_mm_manager: the DRM MM manager containing the primary and shadow BOs.
> + *
> + * Swaps the primary buffer object with the shadow buffer object in the DRM MM
> + * manager.
> + *
> + * Return: None.
> + */
> +void xe_drm_mm_bo_swap_shadow(struct xe_drm_mm_manager *drm_mm_manager)
> +{
> +	struct xe_device *xe = tile_to_xe(drm_mm_manager->bo->tile);
> +
> +	xe_assert(xe, drm_mm_manager->shadow);
> +	lockdep_assert_held(&drm_mm_manager->swap_guard);
> +
> +	swap(drm_mm_manager->bo, drm_mm_manager->shadow);
> +	if (!drm_mm_manager->bo->vmap.is_iomem)
> +		drm_mm_manager->cpu_addr = drm_mm_manager->bo->vmap.vaddr;
> +}
> +
> +/**
> + * xe_drm_mm_sync_shadow() - Synchronize the shadow BO with the primary BO.
> + * @drm_mm_manager: the DRM MM manager containing the primary and shadow BOs.
> + * @node: the DRM MM node representing the region to synchronize.
> + *
> + * Copies the contents of the specified region from the primary buffer object to
> + * the shadow buffer object in the DRM MM manager.
> + *
> + * Return: None.
> + */
> +void xe_drm_mm_sync_shadow(struct xe_drm_mm_manager *drm_mm_manager,
> +			   struct drm_mm_node *node)
> +{
> +	struct xe_device *xe = tile_to_xe(drm_mm_manager->bo->tile);
> +
> +	xe_assert(xe, drm_mm_manager->shadow);
> +	lockdep_assert_held(&drm_mm_manager->swap_guard);
> +
> +	xe_map_memcpy_to(xe, &drm_mm_manager->shadow->vmap,
> +			 node->start,
> +			 drm_mm_manager->cpu_addr + node->start,
> +			 node->size);
> +}
> +
> +/**
> + * xe_drm_mm_insert_node() - Insert a node into the DRM MM manager.
> + * @drm_mm_manager: the DRM MM manager to insert the node into.
> + * @node: the DRM MM node to insert.
> + * @size: the size of the node to insert.
> + *
> + * Inserts a node into the DRM MM manager and clears the corresponding memory region
> + * in both the primary and shadow buffer objects.
> + *
> + * Return: 0 on success, or a negative error code on failure.
> + */
> +int xe_drm_mm_insert_node(struct xe_drm_mm_manager *drm_mm_manager,
> +			  struct drm_mm_node *node, u32 size)
> +{
> +	struct drm_mm *mm = &drm_mm_manager->base;
> +	int ret;
> +
> +	ret = drm_mm_insert_node(mm, node, size);
> +	if (ret)
> +		return ret;
> +
> +	memset((void *)drm_mm_manager->bo->vmap.vaddr + node->start, 0, node->size);
> +	if (drm_mm_manager->shadow)
> +		memset((void *)drm_mm_manager->shadow->vmap.vaddr + node->start, 0,
> +		       node->size);

Likewise here, I don't think you need these memsets as both the primary
and shadow are initialized with MI_NOOP, then on each SA release set
back to MI_NOOP.

Other than that, patch looks good.

Matt

> +	return 0;
> +}
> +
> +/**
> + * xe_drm_mm_remove_node() - Remove a node from the DRM MM manager.
> + * @node: the DRM MM node to remove.
> + *
> + * Return: None.
> + */
> +void xe_drm_mm_remove_node(struct drm_mm_node *node)
> +{
> +	return drm_mm_remove_node(node);
> +}
> diff --git a/drivers/gpu/drm/xe/xe_drm_mm.h b/drivers/gpu/drm/xe/xe_drm_mm.h
> new file mode 100644
> index 000000000000..aeb7cab92d0b
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_drm_mm.h
> @@ -0,0 +1,55 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2026 Intel Corporation
> + */
> +#ifndef _XE_DRM_MM_H_
> +#define _XE_DRM_MM_H_
> +
> +#include <linux/sizes.h>
> +#include <linux/types.h>
> +
> +#include "xe_bo.h"
> +#include "xe_drm_mm_types.h"
> +
> +struct dma_fence;
> +struct xe_tile;
> +
> +#define XE_DRM_MM_BO_MANAGER_FLAG_SHADOW    BIT(0)
> +
> +struct xe_drm_mm_manager *xe_drm_mm_manager_init(struct xe_tile *tile, u32 size,
> +						 u32 guard, u32 flags);
> +void xe_drm_mm_bo_swap_shadow(struct xe_drm_mm_manager *drm_mm_manager);
> +void xe_drm_mm_sync_shadow(struct xe_drm_mm_manager *drm_mm_manager,
> +			   struct drm_mm_node *node);
> +int xe_drm_mm_insert_node(struct xe_drm_mm_manager *drm_mm_manager,
> +			  struct drm_mm_node *node, u32 size);
> +void xe_drm_mm_remove_node(struct drm_mm_node *node);
> +
> +/**
> + * xe_drm_mm_manager_gpu_addr() - Retrieve GPU address of a back storage BO
> + * within a memory manager.
> + * @drm_mm_manager: The DRM MM memory manager.
> + *
> + * Returns: GGTT address of the back storage BO
> + */
> +static inline u64 xe_drm_mm_manager_gpu_addr(struct xe_drm_mm_manager
> +					     *drm_mm_manager)
> +{
> +	return xe_bo_ggtt_addr(drm_mm_manager->bo);
> +}
> +
> +/**
> + * xe_drm_mm_bo_swap_guard() - Retrieve the mutex used to guard swap operations
> + * on a memory manager.
> + * @drm_mm_manager: The DRM MM memory manager.
> + *
> + * Returns: Swap guard mutex.
> + */
> +static inline struct mutex *xe_drm_mm_bo_swap_guard(struct xe_drm_mm_manager
> +						    *drm_mm_manager)
> +{
> +	return &drm_mm_manager->swap_guard;
> +}
> +
> +#endif
> +
> diff --git a/drivers/gpu/drm/xe/xe_drm_mm_types.h b/drivers/gpu/drm/xe/xe_drm_mm_types.h
> new file mode 100644
> index 000000000000..69e0937dd8de
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_drm_mm_types.h
> @@ -0,0 +1,42 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2026 Intel Corporation
> + */
> +
> +#ifndef _XE_DRM_MM_TYPES_H_
> +#define _XE_DRM_MM_TYPES_H_
> +
> +#include <drm/drm_mm.h>
> +
> +struct xe_bo;
> +
> +struct xe_drm_mm_manager {
> +	/** @base: Range allocator over [0, @size) in bytes */
> +	struct drm_mm base;
> +	/** @bo: Active pool BO (GGTT-pinned, CPU-mapped). */
> +	struct xe_bo *bo;
> +	/** @shadow: Shadow BO for atomic command updates. */
> +	struct xe_bo *shadow;
> +	/** @swap_guard: Timeline guard updating @bo and @shadow */
> +	struct mutex swap_guard;
> +	/** @cpu_addr: CPU virtual address of the active BO. */
> +	void *cpu_addr;
> +	/** @size: Total size of the managed address space. */
> +	u64 size;
> +	/** @is_iomem: Whether the managed address space is I/O memory. */
> +	bool is_iomem;
> +};
> +
> +struct xe_drm_mm_bb {
> +	/** @node: Range node for this batch buffer. */
> +	struct drm_mm_node node;
> +	/** @manager: Manager this batch buffer belongs to. */
> +	struct xe_drm_mm_manager *manager;
> +	/** @cs: Command stream for this batch buffer. */
> +	u32 *cs;
> +	/** @len: Length of the CS in dwords. */
> +	u32 len;
> +};
> +
> +#endif
> +
> -- 
> 2.43.0
> 

  reply	other threads:[~2026-03-26 19:48 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-20 12:12 [PATCH 0/3] USE drm mm instead of drm SA for CCS read/write Satyanarayana K V P
2026-03-20 12:12 ` [PATCH 1/3] drm/xe/mm: add XE DRM MM manager with shadow support Satyanarayana K V P
2026-03-26 19:48   ` Matthew Brost [this message]
2026-03-26 19:57   ` Thomas Hellström
2026-03-27 10:54     ` Michal Wajdeczko
2026-03-27 11:06       ` Thomas Hellström
2026-03-27 19:54         ` Matthew Brost
2026-03-27 21:26       ` Matthew Brost
2026-03-20 12:12 ` [PATCH 2/3] drm/xe/mm: Add batch buffer allocation functions for xe_drm_mm manager Satyanarayana K V P
2026-03-26 19:50   ` Matthew Brost
2026-03-20 12:12 ` [PATCH 3/3] drm/xe/vf: Use drm mm instead of drm sa for CCS read/write Satyanarayana K V P
2026-03-26 19:52   ` Matthew Brost
2026-03-27 11:07   ` Michal Wajdeczko
2026-03-27 11:17     ` K V P, Satyanarayana
2026-03-27 11:47       ` Michal Wajdeczko
2026-03-27 20:07         ` Matthew Brost
2026-03-20 12:17 ` ✗ CI.checkpatch: warning for USE drm mm instead of drm SA " Patchwork
2026-03-20 12:19 ` ✓ CI.KUnit: success " Patchwork
2026-03-20 13:08 ` ✓ Xe.CI.BAT: " Patchwork
2026-03-21 11:52 ` ✗ Xe.CI.FULL: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=acWNgikBoPxRNHGJ@gsse-cloud1.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=dev@lankhorst.se \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=michal.wajdeczko@intel.com \
    --cc=satyanarayana.k.v.p@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox