intel-xe.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
To: Matthew Auld <matthew.auld@intel.com>
Cc: <intel-xe@lists.freedesktop.org>
Subject: Re: [RFC 14/34] drm/xe: Runtime PM wake on every debugfs call
Date: Wed, 14 Feb 2024 13:57:05 -0500	[thread overview]
Message-ID: <Zc0NAUbVw2qlsjId@intel.com> (raw)
In-Reply-To: <6303efb2-d36f-4666-803d-abe55ced98b4@intel.com>

On Mon, Feb 05, 2024 at 11:10:19AM +0000, Matthew Auld wrote:
> On 26/01/2024 20:30, Rodrigo Vivi wrote:
> > Let's ensure our PCI device is awaken on every debugfs call.
> > Let's increase the runtime_pm protection and start moving
> > that to the outer bounds.
> > 
> > Also remove the mem_access get_put helpers, now that they are not
> > needed anymore.
> 
> Wrong commit?

hmm... bad sentence probably..

what about:

"Also, let's remove the mem_access_{get,put} from where they are not
needed anymore."


> 
> > 
> > Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> 
> Otherwise,
> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
> 
> > ---
> >   drivers/gpu/drm/xe/xe_debugfs.c     | 10 +++---
> >   drivers/gpu/drm/xe/xe_gt_debugfs.c  | 53 ++++++++++++++++++++++++++---
> >   drivers/gpu/drm/xe/xe_guc_debugfs.c |  9 ++---
> >   drivers/gpu/drm/xe/xe_huc_debugfs.c |  5 +--
> >   drivers/gpu/drm/xe/xe_ttm_sys_mgr.c |  5 ++-
> >   5 files changed, 66 insertions(+), 16 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
> > index 01db5b27bec5..8abdf3c17e1d 100644
> > --- a/drivers/gpu/drm/xe/xe_debugfs.c
> > +++ b/drivers/gpu/drm/xe/xe_debugfs.c
> > @@ -12,6 +12,7 @@
> >   #include "xe_bo.h"
> >   #include "xe_device.h"
> >   #include "xe_gt_debugfs.h"
> > +#include "xe_pm.h"
> >   #include "xe_step.h"
> >   #ifdef CONFIG_DRM_XE_DEBUG
> > @@ -37,6 +38,8 @@ static int info(struct seq_file *m, void *data)
> >   	struct xe_gt *gt;
> >   	u8 id;
> > +	xe_pm_runtime_get(xe);
> > +
> >   	drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100);
> >   	drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100);
> >   	drm_printf(&p, "stepping G:%s M:%s D:%s B:%s\n",
> > @@ -63,6 +66,7 @@ static int info(struct seq_file *m, void *data)
> >   			   gt->info.engine_mask);
> >   	}
> > +	xe_pm_runtime_put(xe);
> >   	return 0;
> >   }
> > @@ -76,8 +80,7 @@ static int forcewake_open(struct inode *inode, struct file *file)
> >   	struct xe_gt *gt;
> >   	u8 id;
> > -	xe_device_mem_access_get(xe);
> > -
> > +	xe_pm_runtime_get(xe);
> >   	for_each_gt(gt, xe, id)
> >   		XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
> > @@ -92,8 +95,7 @@ static int forcewake_release(struct inode *inode, struct file *file)
> >   	for_each_gt(gt, xe, id)
> >   		XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
> > -
> > -	xe_device_mem_access_put(xe);
> > +	xe_pm_runtime_put(xe);
> >   	return 0;
> >   }
> > diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
> > index c4b67cf09f8f..6b4dc2927727 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
> > @@ -18,6 +18,7 @@
> >   #include "xe_lrc.h"
> >   #include "xe_macros.h"
> >   #include "xe_pat.h"
> > +#include "xe_pm.h"
> >   #include "xe_reg_sr.h"
> >   #include "xe_reg_whitelist.h"
> >   #include "xe_uc_debugfs.h"
> > @@ -37,10 +38,10 @@ static int hw_engines(struct seq_file *m, void *data)
> >   	enum xe_hw_engine_id id;
> >   	int err;
> > -	xe_device_mem_access_get(xe);
> > +	xe_pm_runtime_get(xe);
> >   	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
> >   	if (err) {
> > -		xe_device_mem_access_put(xe);
> > +		xe_pm_runtime_put(xe);
> >   		return err;
> >   	}
> > @@ -48,7 +49,7 @@ static int hw_engines(struct seq_file *m, void *data)
> >   		xe_hw_engine_print(hwe, &p);
> >   	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
> > -	xe_device_mem_access_put(xe);
> > +	xe_pm_runtime_put(xe);
> >   	if (err)
> >   		return err;
> > @@ -59,18 +60,23 @@ static int force_reset(struct seq_file *m, void *data)
> >   {
> >   	struct xe_gt *gt = node_to_gt(m->private);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_gt_reset_async(gt);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> >   	return 0;
> >   }
> >   static int sa_info(struct seq_file *m, void *data)
> >   {
> > -	struct xe_tile *tile = gt_to_tile(node_to_gt(m->private));
> > +	struct xe_gt *gt = node_to_gt(m->private);
> > +	struct xe_tile *tile = gt_to_tile(gt);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, &p,
> >   				     tile->mem.kernel_bb_pool->gpu_addr);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> >   	return 0;
> >   }
> > @@ -80,7 +86,9 @@ static int topology(struct seq_file *m, void *data)
> >   	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_gt_topology_dump(gt, &p);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> >   	return 0;
> >   }
> > @@ -90,7 +98,9 @@ static int steering(struct seq_file *m, void *data)
> >   	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_gt_mcr_steering_dump(gt, &p);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> >   	return 0;
> >   }
> > @@ -99,8 +109,13 @@ static int ggtt(struct seq_file *m, void *data)
> >   {
> >   	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	int ret;
> > +
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> > +	ret = xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, &p);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> > -	return xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, &p);
> > +	return ret;
> >   }
> >   static int register_save_restore(struct seq_file *m, void *data)
> > @@ -110,6 +125,8 @@ static int register_save_restore(struct seq_file *m, void *data)
> >   	struct xe_hw_engine *hwe;
> >   	enum xe_hw_engine_id id;
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> > +
> >   	xe_reg_sr_dump(&gt->reg_sr, &p);
> >   	drm_printf(&p, "\n");
> > @@ -127,6 +144,8 @@ static int register_save_restore(struct seq_file *m, void *data)
> >   	for_each_hw_engine(hwe, gt, id)
> >   		xe_reg_whitelist_dump(&hwe->reg_whitelist, &p);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> > +
> >   	return 0;
> >   }
> > @@ -135,7 +154,9 @@ static int workarounds(struct seq_file *m, void *data)
> >   	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_wa_dump(gt, &p);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> >   	return 0;
> >   }
> > @@ -145,48 +166,70 @@ static int pat(struct seq_file *m, void *data)
> >   	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_pat_dump(gt, &p);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> >   	return 0;
> >   }
> >   static int rcs_default_lrc(struct seq_file *m, void *data)
> >   {
> > +	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_RENDER);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> > +
> >   	return 0;
> >   }
> >   static int ccs_default_lrc(struct seq_file *m, void *data)
> >   {
> > +	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COMPUTE);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> > +
> >   	return 0;
> >   }
> >   static int bcs_default_lrc(struct seq_file *m, void *data)
> >   {
> > +	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COPY);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> > +
> >   	return 0;
> >   }
> >   static int vcs_default_lrc(struct seq_file *m, void *data)
> >   {
> > +	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_DECODE);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> > +
> >   	return 0;
> >   }
> >   static int vecs_default_lrc(struct seq_file *m, void *data)
> >   {
> > +	struct xe_gt *gt = node_to_gt(m->private);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > +	xe_pm_runtime_get(gt_to_xe(gt));
> >   	xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_ENHANCE);
> > +	xe_pm_runtime_put(gt_to_xe(gt));
> > +
> >   	return 0;
> >   }
> > diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
> > index ffd7d53bcc42..d3822cbea273 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
> > @@ -14,6 +14,7 @@
> >   #include "xe_guc_ct.h"
> >   #include "xe_guc_log.h"
> >   #include "xe_macros.h"
> > +#include "xe_pm.h"
> >   static struct xe_guc *node_to_guc(struct drm_info_node *node)
> >   {
> > @@ -26,9 +27,9 @@ static int guc_info(struct seq_file *m, void *data)
> >   	struct xe_device *xe = guc_to_xe(guc);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > -	xe_device_mem_access_get(xe);
> > +	xe_pm_runtime_get(xe);
> >   	xe_guc_print_info(guc, &p);
> > -	xe_device_mem_access_put(xe);
> > +	xe_pm_runtime_put(xe);
> >   	return 0;
> >   }
> > @@ -39,9 +40,9 @@ static int guc_log(struct seq_file *m, void *data)
> >   	struct xe_device *xe = guc_to_xe(guc);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > -	xe_device_mem_access_get(xe);
> > +	xe_pm_runtime_get(xe);
> >   	xe_guc_log_print(&guc->log, &p);
> > -	xe_device_mem_access_put(xe);
> > +	xe_pm_runtime_put(xe);
> >   	return 0;
> >   }
> > diff --git a/drivers/gpu/drm/xe/xe_huc_debugfs.c b/drivers/gpu/drm/xe/xe_huc_debugfs.c
> > index 18585a7eeb9d..3a888a40188b 100644
> > --- a/drivers/gpu/drm/xe/xe_huc_debugfs.c
> > +++ b/drivers/gpu/drm/xe/xe_huc_debugfs.c
> > @@ -12,6 +12,7 @@
> >   #include "xe_gt.h"
> >   #include "xe_huc.h"
> >   #include "xe_macros.h"
> > +#include "xe_pm.h"
> >   static struct xe_gt *
> >   huc_to_gt(struct xe_huc *huc)
> > @@ -36,9 +37,9 @@ static int huc_info(struct seq_file *m, void *data)
> >   	struct xe_device *xe = huc_to_xe(huc);
> >   	struct drm_printer p = drm_seq_file_printer(m);
> > -	xe_device_mem_access_get(xe);
> > +	xe_pm_runtime_get(xe);
> >   	xe_huc_print_info(huc, &p);
> > -	xe_device_mem_access_put(xe);
> > +	xe_pm_runtime_put(xe);
> >   	return 0;
> >   }
> > diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
> > index 3e1fa0c832ca..9844a8edbfe1 100644
> > --- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
> > +++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
> > @@ -73,7 +73,10 @@ static void xe_ttm_sys_mgr_del(struct ttm_resource_manager *man,
> >   static void xe_ttm_sys_mgr_debug(struct ttm_resource_manager *man,
> >   				 struct drm_printer *printer)
> >   {
> > -
> > +	/*
> > +	 * This function is called by debugfs entry and would require
> > +	 * pm_runtime_{get,put} wrappers around any operation.
> > +	 */
> >   }
> >   static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = {

  reply	other threads:[~2024-02-14 18:57 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-26 20:30 [RFC 00/34] Kill mem_access v2 Rodrigo Vivi
2024-01-26 20:30 ` [RFC 01/34] Revert "drm/xe/uc: Store firmware binary in system-memory backed BO" Rodrigo Vivi
2024-01-26 20:30 ` [RFC 02/34] drm/xe: Document Xe PM component Rodrigo Vivi
2024-01-29 10:38   ` Francois Dugast
2024-01-26 20:30 ` [RFC 03/34] drm/xe: Fix display runtime_pm handling Rodrigo Vivi
2024-02-05  9:11   ` Matthew Auld
2024-02-14 18:05     ` Rodrigo Vivi
2024-02-15  9:30       ` Matthew Auld
2024-02-15 22:19         ` Rodrigo Vivi
2024-01-26 20:30 ` [RFC 04/34] drm/xe: Create a xe_pm_runtime_resume_and_get variant for display Rodrigo Vivi
2024-01-26 20:30 ` [RFC 05/34] drm/xe: Convert xe_pm_runtime_{get, put} to void and protect from recursion Rodrigo Vivi
2024-01-26 20:30 ` [RFC 06/34] drm/xe: Prepare display for D3Cold Rodrigo Vivi
2024-01-26 20:30 ` [RFC 07/34] drm/xe: Convert mem_access assertion towards the runtime_pm state Rodrigo Vivi
2024-02-05  9:55   ` Matthew Auld
2024-02-14 18:15     ` Rodrigo Vivi
2024-01-26 20:30 ` [RFC 08/34] drm/xe: Runtime PM wake on every IOCTL Rodrigo Vivi
2024-02-05  9:39   ` Matthew Auld
2024-01-26 20:30 ` [RFC 09/34] drm/xe: Convert kunit tests from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-05  9:57   ` Matthew Auld
2024-01-26 20:30 ` [RFC 10/34] drm/xe: Convert scheduler towards direct pm_runtime Rodrigo Vivi
2024-02-05 10:46   ` Matthew Auld
2024-01-26 20:30 ` [RFC 11/34] drm/xe: Runtime PM wake on every sysfs call Rodrigo Vivi
2024-02-05 10:55   ` Matthew Auld
2024-02-14 18:48     ` Rodrigo Vivi
2024-01-26 20:30 ` [RFC 12/34] drm/xe: Ensure device is awake before removing it Rodrigo Vivi
2024-02-05 11:05   ` Matthew Auld
2024-02-14 18:51     ` Rodrigo Vivi
2024-01-26 20:30 ` [RFC 13/34] drm/xe: Remove mem_access from guc_pc calls Rodrigo Vivi
2024-02-05 11:08   ` Matthew Auld
2024-01-26 20:30 ` [RFC 14/34] drm/xe: Runtime PM wake on every debugfs call Rodrigo Vivi
2024-02-05 11:10   ` Matthew Auld
2024-02-14 18:57     ` Rodrigo Vivi [this message]
2024-01-26 20:30 ` [RFC 15/34] drm/xe: Replace dma_buf mem_access per direct xe_pm_runtime calls Rodrigo Vivi
2024-02-05 11:15   ` Matthew Auld
2024-01-26 20:30 ` [RFC 16/34] drm/xe: Removing extra mem_access protection from runtime pm Rodrigo Vivi
2024-02-05 11:23   ` Matthew Auld
2024-01-26 20:30 ` [RFC 17/34] drm/xe: Convert hwmon from mem_access to xe_pm_runtime calls Rodrigo Vivi
2024-02-05 11:25   ` Matthew Auld
2024-01-26 20:30 ` [RFC 18/34] drm/xe: Move lockdep protection from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-05 11:31   ` Matthew Auld
2024-01-26 20:30 ` [RFC 19/34] drm/xe: Remove pm_runtime lockdep Rodrigo Vivi
2024-02-05 11:54   ` Matthew Auld
2024-02-15 22:47     ` Rodrigo Vivi
2024-02-20 17:48       ` Matthew Auld
2024-02-28 16:53         ` Rodrigo Vivi
2024-01-26 20:30 ` [RFC 20/34] drm/xe: Stop checking for power_lost on D3Cold Rodrigo Vivi
2024-01-26 20:30 ` [RFC 21/34] drm/xe: Convert GuC CT paths from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-05 12:23   ` Matthew Auld
2024-02-28 16:51     ` Rodrigo Vivi
2024-01-26 20:30 ` [RFC 22/34] drm/xe: Keep D0 for the entire duration of a LR VM Rodrigo Vivi
2024-01-26 20:30 ` [RFC 23/34] drm/xe: Ensure D0 on TLB invalidation Rodrigo Vivi
2024-02-05 12:41   ` Matthew Auld
2024-01-26 20:30 ` [RFC 24/34] drm/xe: Remove useless mem_access protection for query ioctls Rodrigo Vivi
2024-02-05 12:43   ` Matthew Auld
2024-01-26 20:30 ` [RFC 25/34] drm/xe: Convert gsc_work from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-05 13:11   ` Matthew Auld
2024-01-26 20:30 ` [RFC 26/34] drm/xe: VMs don't need the mem_access protection anymore Rodrigo Vivi
2024-02-05 13:29   ` Matthew Auld
2024-02-15 22:37     ` Rodrigo Vivi
2024-01-26 20:30 ` [RFC 27/34] drm/xe: Remove useless mem_access during probe Rodrigo Vivi
2024-02-05 13:18   ` Matthew Auld
2024-01-26 20:30 ` [RFC 28/34] drm/xe: Remove mem_access from suspend and resume functions Rodrigo Vivi
2024-02-05 13:30   ` Matthew Auld
2024-01-26 20:30 ` [RFC 29/34] drm/xe: Convert gt_reset from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-05 13:33   ` Matthew Auld
2024-01-26 20:30 ` [RFC 30/34] drm/xe: Remove useless mem_access on PAT dumps Rodrigo Vivi
2024-02-05 13:34   ` Matthew Auld
2024-01-26 20:30 ` [RFC 31/34] drm/xe: Remove inner mem_access protections Rodrigo Vivi
2024-01-26 20:30 ` [RFC 32/34] drm/xe: Kill xe_device_mem_access_{get*,put} Rodrigo Vivi
2024-01-26 20:30 ` [RFC 33/34] drm/xe: Remove unused runtime pm helper Rodrigo Vivi
2024-01-26 20:30 ` [RFC 34/34] drm/xe: Enable D3Cold on 'low' VRAM utilization Rodrigo Vivi
2024-01-29 12:12   ` Matthew Auld
2024-01-29 19:01     ` Vivi, Rodrigo
2024-01-30 15:01       ` Gupta, Anshuman
2024-01-26 20:39 ` ✓ CI.Patch_applied: success for Kill mem_access v2 Patchwork
2024-01-26 20:40 ` ✗ CI.checkpatch: warning " Patchwork
2024-01-26 20:40 ` ✗ CI.KUnit: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Zc0NAUbVw2qlsjId@intel.com \
    --to=rodrigo.vivi@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).