Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Oak Zeng <oak.zeng@intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [CI 25/43] drm/xe: Use drm_mem_region for xe
Date: Tue, 11 Jun 2024 22:25:47 -0400	[thread overview]
Message-ID: <20240612022605.385062-25-oak.zeng@intel.com> (raw)
In-Reply-To: <20240612022605.385062-1-oak.zeng@intel.com>

drm_mem_region was introduced to move some memory management
codes to drm layer so it can be shared b/t different vendor
drivers. This patch apply drm_mem_region concept to xekmd
driver.

drm_mem_region is the parent class of xe_mem_region. Some
xe_mem_region member such as dpa_base is deleted as it
is already in the parent class.

Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Brian Welty <brian.welty@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Oak Zeng <oak.zeng@intel.com>
---
 drivers/gpu/drm/xe/display/xe_fb_pin.c        |  2 +-
 drivers/gpu/drm/xe/display/xe_plane_initial.c |  2 +-
 drivers/gpu/drm/xe/xe_bo.c                    |  6 +++---
 drivers/gpu/drm/xe/xe_device_types.h          | 11 ++---------
 drivers/gpu/drm/xe/xe_migrate.c               |  8 ++++----
 drivers/gpu/drm/xe/xe_query.c                 |  2 +-
 drivers/gpu/drm/xe/xe_tile.c                  |  2 +-
 drivers/gpu/drm/xe/xe_ttm_vram_mgr.c          |  2 +-
 drivers/gpu/drm/xe/xe_vram.c                  | 12 ++++++------
 9 files changed, 20 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index a2f417209124..5c4590c62c08 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -272,7 +272,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
 		 * accessible.  This is important on small-bar systems where
 		 * only some subset of VRAM is CPU accessible.
 		 */
-		if (tile->mem.vram.io_size < tile->mem.vram.usable_size) {
+		if (tile->mem.vram.io_size < tile->mem.vram.drm_mr.usable_size) {
 			ret = -EINVAL;
 			goto err;
 		}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index e135b20962d9..c2c079a2b133 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -86,7 +86,7 @@ initial_plane_bo(struct xe_device *xe,
 		 * We don't currently expect this to ever be placed in the
 		 * stolen portion.
 		 */
-		if (phys_base >= tile0->mem.vram.usable_size) {
+		if (phys_base >= tile0->mem.vram.drm_mr.usable_size) {
 			drm_err(&xe->drm,
 				"Initial plane programming using invalid range, phys_base=%pa\n",
 				&phys_base);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index d2763448ebaa..17afc18e413e 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -173,7 +173,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
 	xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
 
 	vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
-	xe_assert(xe, vram && vram->usable_size);
+	xe_assert(xe, vram && vram->drm_mr.usable_size);
 	io_size = vram->io_size;
 
 	/*
@@ -184,7 +184,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
 			XE_BO_FLAG_GGTT))
 		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
 
-	if (io_size < vram->usable_size) {
+	if (io_size < vram->drm_mr.usable_size) {
 		if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
 			place.fpfn = 0;
 			place.lpfn = io_size >> PAGE_SHIFT;
@@ -1638,7 +1638,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
 	if (res->mem_type == XE_PL_STOLEN)
 		return xe_ttm_stolen_gpu_offset(xe);
 
-	return res_to_mem_region(res)->dpa_base;
+	return res_to_mem_region(res)->drm_mr.dpa_base;
 }
 
 /**
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index f1c09824b145..cf61b52e6d84 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -11,6 +11,7 @@
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
 #include <drm/ttm/ttm_device.h>
+#include <drm/drm_svm.h>
 
 #include "xe_devcoredump_types.h"
 #include "xe_heci_gsc.h"
@@ -69,6 +70,7 @@ struct xe_pat_ops;
  * device, such as HBM memory or CXL extension memory.
  */
 struct xe_mem_region {
+	struct drm_mem_region drm_mr;
 	/** @io_start: IO start address of this VRAM instance */
 	resource_size_t io_start;
 	/**
@@ -81,15 +83,6 @@ struct xe_mem_region {
 	 * configuration is known as small-bar.
 	 */
 	resource_size_t io_size;
-	/** @dpa_base: This memory regions's DPA (device physical address) base */
-	resource_size_t dpa_base;
-	/**
-	 * @usable_size: usable size of VRAM
-	 *
-	 * Usable size of VRAM excluding reserved portions
-	 * (e.g stolen mem)
-	 */
-	resource_size_t usable_size;
 	/**
 	 * @actual_physical_size: Actual VRAM size
 	 *
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 0457693315e6..cc8455daa2bb 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -126,7 +126,7 @@ static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
 	 * Remove the DPA to get a correct offset into identity table for the
 	 * migrate offset
 	 */
-	addr -= xe->mem.vram.dpa_base;
+	addr -= xe->mem.vram.drm_mr.dpa_base;
 	return addr + (256ULL << xe_pt_shift(2));
 }
 
@@ -256,21 +256,21 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
 		u64 pos, ofs, flags;
 		/* XXX: Unclear if this should be usable_size? */
 		u64 vram_limit =  xe->mem.vram.actual_physical_size +
-			xe->mem.vram.dpa_base;
+			xe->mem.vram.drm_mr.dpa_base;
 
 		level = 2;
 		ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
 		flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
 						    true, 0);
 
-		xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+		xe_assert(xe, IS_ALIGNED(xe->mem.vram.drm_mr.usable_size, SZ_2M));
 
 		/*
 		 * Use 1GB pages when possible, last chunk always use 2M
 		 * pages as mixing reserved memory (stolen, WOCPM) with a single
 		 * mapping is not allowed on certain platforms.
 		 */
-		for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+		for (pos = xe->mem.vram.drm_mr.dpa_base; pos < vram_limit;
 		     pos += SZ_1G, ofs += 8) {
 			if (pos + SZ_1G >= vram_limit) {
 				u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 995effcb904b..8b3d63420cef 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -334,7 +334,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
 	config->num_params = num_params;
 	config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
 		xe->info.devid | (xe->info.revid << 16);
-	if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
+	if (xe_device_get_root_tile(xe)->mem.vram.drm_mr.usable_size)
 		config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
 			DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
 	config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 15ea0a942f67..109f3118e821 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -132,7 +132,7 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
 	struct xe_device *xe = tile_to_xe(tile);
 	int err;
 
-	if (tile->mem.vram.usable_size) {
+	if (tile->mem.vram.drm_mr.usable_size) {
 		err = xe_ttm_vram_mgr_init(tile, tile->mem.vram_mgr);
 		if (err)
 			return err;
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index fe3779fdba2c..dd31b24fb07d 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -364,7 +364,7 @@ int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
 
 	mgr->vram = vram;
 	return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
-				      vram->usable_size, vram->io_size,
+				      vram->drm_mr.usable_size, vram->io_size,
 				      PAGE_SIZE);
 }
 
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 5bcd59190353..fff18517a9f9 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -150,7 +150,7 @@ static int determine_lmem_bar_size(struct xe_device *xe)
 		return -EIO;
 
 	/* XXX: Need to change when xe link code is ready */
-	xe->mem.vram.dpa_base = 0;
+	xe->mem.vram.drm_mr.dpa_base = 0;
 
 	/* set up a map to the total memory area. */
 	xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
@@ -333,16 +333,16 @@ int xe_vram_probe(struct xe_device *xe)
 			return -ENODEV;
 		}
 
-		tile->mem.vram.dpa_base = xe->mem.vram.dpa_base + tile_offset;
-		tile->mem.vram.usable_size = vram_size;
+		tile->mem.vram.drm_mr.dpa_base = xe->mem.vram.drm_mr.dpa_base + tile_offset;
+		tile->mem.vram.drm_mr.usable_size = vram_size;
 		tile->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
 
-		if (tile->mem.vram.io_size < tile->mem.vram.usable_size)
+		if (tile->mem.vram.io_size < tile->mem.vram.drm_mr.usable_size)
 			drm_info(&xe->drm, "Small BAR device\n");
 		drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
-			 tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
+			 tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.drm_mr.usable_size, &tile->mem.vram.io_size);
 		drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
-			 &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
+			 &tile->mem.vram.drm_mr.dpa_base, tile->mem.vram.drm_mr.dpa_base + (u64)tile->mem.vram.actual_physical_size,
 			 &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
 
 		/* calculate total size using tile size to get the correct HW sizing */
-- 
2.26.3


  parent reply	other threads:[~2024-06-12  2:16 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-12  2:25 [CI 01/43] mm/hmm: let users to tag specific PFNs Oak Zeng
2024-06-12  2:25 ` [CI 02/43] dma-mapping: provide an interface to allocate IOVA Oak Zeng
2024-06-12  2:25 ` [CI 03/43] dma-mapping: provide callbacks to link/unlink pages to specific IOVA Oak Zeng
2024-06-12  2:25 ` [CI 04/43] iommu/dma: Provide an interface to allow preallocate IOVA Oak Zeng
2024-06-12  2:25 ` [CI 05/43] iommu/dma: Prepare map/unmap page functions to receive IOVA Oak Zeng
2024-06-12  2:25 ` [CI 06/43] iommu/dma: Implement link/unlink page callbacks Oak Zeng
2024-06-12  2:25 ` [CI 07/43] drm: move xe_sg_segment_size to drm layer Oak Zeng
2024-06-12  2:25 ` [CI 08/43] drm: Move GPUVA_START/LAST to drm_gpuvm.h Oak Zeng
2024-06-12  2:25 ` [CI 09/43] drm/svm: Mark drm_gpuvm to participate SVM Oak Zeng
2024-06-12  2:25 ` [CI 10/43] drm/svm: introduce drm_mem_region concept Oak Zeng
2024-06-12  2:25 ` [CI 11/43] drm/svm: introduce hmmptr and helper functions Oak Zeng
2024-06-12  2:25 ` [CI 12/43] drm/svm: Introduce helper to remap drm memory region Oak Zeng
2024-06-12  2:25 ` [CI 13/43] drm/svm: handle CPU page fault Oak Zeng
2024-06-12  2:25 ` [CI 14/43] drm/svm: Migrate a range of hmmptr to vram Oak Zeng
2024-06-12  2:25 ` [CI 15/43] drm/svm: Add DRM SVM documentation Oak Zeng
2024-06-12  2:25 ` [CI 16/43] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Oak Zeng
2024-06-12  2:25 ` [CI 17/43] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops Oak Zeng
2024-06-12  2:25 ` [CI 18/43] drm/xe: Convert multiple bind ops into single job Oak Zeng
2024-06-12  2:25 ` [CI 19/43] drm/xe: Update VM trace events Oak Zeng
2024-06-12  2:25 ` [CI 20/43] drm/xe: Update PT layer with better error handling Oak Zeng
2024-06-12  2:25 ` [CI 21/43] drm/xe: Retry BO allocation Oak Zeng
2024-06-12  2:25 ` [CI 22/43] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR flag Oak Zeng
2024-06-12  2:25 ` [CI 23/43] drm/xe: Add a helper to calculate userptr end address Oak Zeng
2024-06-12  2:25 ` [CI 24/43] drm/xe: Add dma_addr res cursor Oak Zeng
2024-06-12  2:25 ` Oak Zeng [this message]
2024-06-12  2:25 ` [CI 26/43] drm/xe: use drm_hmmptr in xe Oak Zeng
2024-06-12  2:25 ` [CI 27/43] drm/xe: Moving to range based vma invalidation Oak Zeng
2024-06-12  2:25 ` [CI 28/43] drm/xe: Support range based page table update Oak Zeng
2024-06-12  2:25 ` [CI 29/43] drm/xe/uapi: Add DRM_XE_VM_CREATE_FLAG_PARTICIPATE_SVM flag Oak Zeng
2024-06-12  2:25 ` [CI 30/43] drm/xe/svm: Create userptr if page fault occurs on system_allocator VMA Oak Zeng
2024-06-12  2:25 ` [CI 31/43] drm/xe/svm: Add faulted userptr VMA garbage collector Oak Zeng
2024-06-12  2:25 ` [CI 32/43] drm/xe: Introduce helper to get tile from memory region Oak Zeng
2024-06-12  2:25 ` [CI 33/43] drm/xe/svm: implement functions to allocate and free device memory Oak Zeng
2024-06-12  2:25 ` [CI 34/43] drm/xe/svm: Get drm device from drm memory region Oak Zeng
2024-06-12  2:25 ` [CI 35/43] drm/xe/svm: Get page map owner of a " Oak Zeng
2024-06-12  2:25 ` [CI 36/43] drm/xe/svm: Add migrate layer functions for SVM support Oak Zeng
2024-06-12  2:25 ` [CI 37/43] drm/xe/svm: introduce svm migration function Oak Zeng
2024-06-12  2:26 ` [CI 38/43] drm/xe/svm: Register xe memory region to drm layer Oak Zeng
2024-06-12  2:26 ` [CI 39/43] drm/xe/svm: Introduce DRM_XE_SVM kernel config Oak Zeng
2024-06-12  2:26 ` [CI 40/43] drm/xe/svm: Migration from sram to vram for system allocator Oak Zeng
2024-06-12  2:26 ` [CI 41/43] drm/xe/svm: Determine a vma is backed by device memory Oak Zeng
2024-06-12  2:26 ` [CI 42/43] drm/xe/svm: Introduce hmm_pfn array based resource cursor Oak Zeng
2024-06-12  2:26 ` [CI 43/43] drm/xe: Enable system allocator uAPI Oak Zeng
2024-06-12  3:14 ` ✓ CI.Patch_applied: success for series starting with [CI,01/43] mm/hmm: let users to tag specific PFNs Patchwork
2024-06-12  3:15 ` ✗ CI.checkpatch: warning " Patchwork
2024-06-12  3:16 ` ✗ CI.KUnit: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240612022605.385062-25-oak.zeng@intel.com \
    --to=oak.zeng@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox