intel-xe.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/9] drm/xe: Remove useless mem_access during probe
@ 2024-03-04 18:21 Rodrigo Vivi
  2024-03-04 18:21 ` [PATCH 2/9] drm/xe: Convert xe_gem_fault to use direct xe_pm_runtime calls Rodrigo Vivi
                   ` (16 more replies)
  0 siblings, 17 replies; 33+ messages in thread
From: Rodrigo Vivi @ 2024-03-04 18:21 UTC (permalink / raw)
  To: intel-xe; +Cc: matthew.auld, Rodrigo Vivi

xe_pm_init is the very last thing during the xe_pci_probe(),
hence these protections are useless from the point of view
of ensuring that the device is awake.

Let's remove it so we continue towards the goal of killing
xe_device_mem_access.

v2: Adding more cases

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
 drivers/gpu/drm/xe/xe_ggtt.c |  2 --
 drivers/gpu/drm/xe/xe_gt.c   |  9 ---------
 drivers/gpu/drm/xe/xe_tile.c | 10 +++-------
 drivers/gpu/drm/xe/xe_uc.c   | 11 -----------
 4 files changed, 3 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 717d0e76277a..355e4bb987cb 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -206,14 +206,12 @@ static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
 	u64 start, end;
 
 	/* Display may have allocated inside ggtt, so be careful with clearing here */
-	xe_device_mem_access_get(tile_to_xe(ggtt->tile));
 	mutex_lock(&ggtt->lock);
 	drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
 		xe_ggtt_clear(ggtt, start, end - start);
 
 	xe_ggtt_invalidate(ggtt);
 	mutex_unlock(&ggtt->lock);
-	xe_device_mem_access_put(tile_to_xe(ggtt->tile));
 }
 
 int xe_ggtt_init(struct xe_ggtt *ggtt)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 85408e7a932b..063b710a8c7b 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -347,7 +347,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
 {
 	int err, i;
 
-	xe_device_mem_access_get(gt_to_xe(gt));
 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
 	if (err)
 		goto err_hw_fence_irq;
@@ -389,7 +388,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
 
 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
 	XE_WARN_ON(err);
-	xe_device_mem_access_put(gt_to_xe(gt));
 
 	return 0;
 
@@ -399,7 +397,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
 err_hw_fence_irq:
 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
-	xe_device_mem_access_put(gt_to_xe(gt));
 
 	return err;
 }
@@ -408,7 +405,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
 {
 	int err, i;
 
-	xe_device_mem_access_get(gt_to_xe(gt));
 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 	if (err)
 		goto err_hw_fence_irq;
@@ -474,7 +470,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
 
 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
 	XE_WARN_ON(err);
-	xe_device_mem_access_put(gt_to_xe(gt));
 
 	return 0;
 
@@ -483,7 +478,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
 err_hw_fence_irq:
 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
-	xe_device_mem_access_put(gt_to_xe(gt));
 
 	return err;
 }
@@ -496,7 +490,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
 {
 	int err;
 
-	xe_device_mem_access_get(gt_to_xe(gt));
 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
 	if (err)
 		goto out;
@@ -519,8 +512,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
 out_fw:
 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
 out:
-	xe_device_mem_access_put(gt_to_xe(gt));
-
 	return err;
 }
 
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 044c20881de7..74ecb5f39438 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -160,23 +160,19 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
 {
 	int err;
 
-	xe_device_mem_access_get(tile_to_xe(tile));
-
 	err = tile_ttm_mgr_init(tile);
 	if (err)
-		goto err_mem_access;
+		return err;
 
 	tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
 	if (IS_ERR(tile->mem.kernel_bb_pool))
-		err = PTR_ERR(tile->mem.kernel_bb_pool);
+		return PTR_ERR(tile->mem.kernel_bb_pool);
 
 	xe_wa_apply_tile_workarounds(tile);
 
 	xe_tile_sysfs_init(tile);
 
-err_mem_access:
-	xe_device_mem_access_put(tile_to_xe(tile));
-	return err;
+	return 0;
 }
 
 void xe_tile_migrate_wait(struct xe_tile *tile)
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 7033f8c1b431..4feb35c95a1c 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -32,11 +32,8 @@ uc_to_xe(struct xe_uc *uc)
 /* Should be called once at driver load only */
 int xe_uc_init(struct xe_uc *uc)
 {
-	struct xe_device *xe = uc_to_xe(uc);
 	int ret;
 
-	xe_device_mem_access_get(xe);
-
 	/*
 	 * We call the GuC/HuC/GSC init functions even if GuC submission is off
 	 * to correctly move our tracking of the FW state to "disabled".
@@ -65,16 +62,8 @@ int xe_uc_init(struct xe_uc *uc)
 		goto err;
 
 	ret = xe_guc_db_mgr_init(&uc->guc.dbm, ~0);
-	if (ret)
-		goto err;
-
-	xe_device_mem_access_put(xe);
-
-	return 0;
 
 err:
-	xe_device_mem_access_put(xe);
-
 	return ret;
 }
 
-- 
2.43.2


^ permalink raw reply related	[flat|nested] 33+ messages in thread

end of thread, other threads:[~2024-03-06 20:15 UTC | newest]

Thread overview: 33+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-04 18:21 [PATCH 1/9] drm/xe: Remove useless mem_access during probe Rodrigo Vivi
2024-03-04 18:21 ` [PATCH 2/9] drm/xe: Convert xe_gem_fault to use direct xe_pm_runtime calls Rodrigo Vivi
2024-03-05 10:18   ` Matthew Auld
2024-03-05 11:29   ` Matthew Auld
2024-03-05 22:29     ` Rodrigo Vivi
2024-03-04 18:21 ` [PATCH 3/9] drm/xe: Move lockdep protection from mem_access to xe_pm_runtime Rodrigo Vivi
2024-03-05 10:20   ` Matthew Auld
2024-03-04 18:21 ` [PATCH 4/9] drm/xe: Move xe_irq runtime suspend and resume out of lockdep Rodrigo Vivi
2024-03-05 11:07   ` Matthew Auld
2024-03-05 22:45     ` Rodrigo Vivi
2024-03-06 16:04       ` Matthew Auld
2024-03-06 17:49         ` Rodrigo Vivi
2024-03-06 18:56           ` Matthew Auld
2024-03-06 20:04             ` Rodrigo Vivi
2024-03-04 18:21 ` [PATCH 5/9] drm/xe: Removing useless mem_access protection from runtime pm Rodrigo Vivi
2024-03-05 10:22   ` Matthew Auld
2024-03-04 18:21 ` [PATCH 6/9] drm/xe: Introduce xe_pm_runtime_get_noresume for inner callers Rodrigo Vivi
2024-03-05 10:29   ` Matthew Auld
2024-03-04 18:21 ` [PATCH 7/9] drm/xe: Convert mem_access_if_ongoing to direct xe_pm_runtime_get_if_active Rodrigo Vivi
2024-03-05 10:24   ` Matthew Auld
2024-03-04 18:21 ` [PATCH 8/9] drm/xe: Ensure all the inner access are using the _noresume variant Rodrigo Vivi
2024-03-05 11:14   ` Matthew Auld
2024-03-04 18:21 ` [PATCH 9/9] drm/xe: Kill xe_device_mem_access_{get*,put} Rodrigo Vivi
2024-03-05 11:18   ` Matthew Auld
2024-03-04 18:27 ` ✓ CI.Patch_applied: success for series starting with [1/9] drm/xe: Remove useless mem_access during probe Patchwork
2024-03-04 18:28 ` ✗ CI.checkpatch: warning " Patchwork
2024-03-04 18:28 ` ✓ CI.KUnit: success " Patchwork
2024-03-04 18:42 ` ✓ CI.Build: " Patchwork
2024-03-04 18:42 ` ✓ CI.Hooks: " Patchwork
2024-03-04 18:44 ` ✓ CI.checksparse: " Patchwork
2024-03-04 19:14 ` ✗ CI.BAT: failure " Patchwork
2024-03-05 10:17 ` [PATCH 1/9] " Matthew Auld
2024-03-06 20:15 ` ✗ CI.Patch_applied: failure for series starting with [1/9] drm/xe: Remove useless mem_access during probe (rev2) Patchwork

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).