From: Rodrigo Vivi <rodrigo.vivi@intel.com>
To: <intel-xe@lists.freedesktop.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>,
Matthew Auld <matthew.auld@intel.com>
Subject: [PATCH 05/14] drm/xe: Runtime PM wake on every sysfs call
Date: Thu, 22 Feb 2024 11:39:28 -0500 [thread overview]
Message-ID: <20240222163937.138342-5-rodrigo.vivi@intel.com> (raw)
In-Reply-To: <20240222163937.138342-1-rodrigo.vivi@intel.com>
Let's ensure our PCI device is awaken on every sysfs call.
Let's increase the runtime_pm protection and start moving
that to the outer bounds.
For now, for the files with small number of attr functions,
let's only call the runtime pm functions directly.
For the hw_engines entries with many files, let's add
the sysfs_ops wrapper.
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
drivers/gpu/drm/xe/xe_device_sysfs.c | 4 ++
drivers/gpu/drm/xe/xe_gt_freq.c | 38 +++++++++++-
drivers/gpu/drm/xe/xe_gt_idle.c | 23 +++++++-
drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c | 3 +
drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c | 58 ++++++++++++++++++-
drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h | 7 +++
drivers/gpu/drm/xe/xe_tile_sysfs.c | 1 +
7 files changed, 129 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 99113a5a2b84..e47c8ad1bb17 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -35,7 +35,9 @@ vram_d3cold_threshold_show(struct device *dev,
if (!xe)
return -EINVAL;
+ xe_pm_runtime_get(xe);
ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -58,7 +60,9 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
+ xe_pm_runtime_get(xe);
ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
+ xe_pm_runtime_put(xe);
return ret ?: count;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index e5b0f4ecdbe8..32b9a743629c 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -15,6 +15,7 @@
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle_sysfs.h"
#include "xe_guc_pc.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Frequency Management
@@ -49,12 +50,23 @@ dev_to_pc(struct device *dev)
return &kobj_to_gt(dev->kobj.parent)->uc.guc.pc;
}
+static struct xe_device *
+dev_to_xe(struct device *dev)
+{
+ return gt_to_xe(kobj_to_gt(dev->kobj.parent));
+}
+
static ssize_t act_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_act_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_act_freq(pc));
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(act_freq);
@@ -65,7 +77,9 @@ static ssize_t cur_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_cur_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -77,8 +91,13 @@ static ssize_t rp0_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rp0_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rp0_freq(pc));
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(rp0_freq);
@@ -86,8 +105,13 @@ static ssize_t rpe_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rpe_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
- return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpe_freq(pc));
+ return sysfs_emit(buf, "%d\n", freq);
}
static DEVICE_ATTR_RO(rpe_freq);
@@ -107,7 +131,9 @@ static ssize_t min_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_min_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -125,7 +151,9 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_set_min_freq(pc, freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -140,7 +168,9 @@ static ssize_t max_freq_show(struct device *dev,
u32 freq;
ssize_t ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_get_max_freq(pc, &freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
@@ -158,7 +188,9 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ xe_pm_runtime_get(dev_to_xe(dev));
ret = xe_guc_pc_set_max_freq(pc, freq);
+ xe_pm_runtime_put(dev_to_xe(dev));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9fcae65b6469..2984680de3f9 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -12,6 +12,7 @@
#include "xe_guc_pc.h"
#include "regs/xe_gt_regs.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Idle
@@ -40,6 +41,15 @@ static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle)
return >idle_to_gt(gtidle)->uc.guc.pc;
}
+static struct xe_device *
+pc_to_xe(struct xe_guc_pc *pc)
+{
+ struct xe_guc *guc = container_of(pc, struct xe_guc, pc);
+ struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
+
+ return gt_to_xe(gt);
+}
+
static const char *gt_idle_state_to_string(enum xe_gt_idle_state state)
{
switch (state) {
@@ -86,8 +96,14 @@ static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ ssize_t ret;
+
+ xe_pm_runtime_get(pc_to_xe(pc));
+ ret = sysfs_emit(buff, "%s\n", gtidle->name);
+ xe_pm_runtime_put(pc_to_xe(pc));
- return sysfs_emit(buff, "%s\n", gtidle->name);
+ return ret;
}
static DEVICE_ATTR_RO(name);
@@ -98,7 +114,9 @@ static ssize_t idle_status_show(struct device *dev,
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
+ xe_pm_runtime_get(pc_to_xe(pc));
state = gtidle->idle_status(pc);
+ xe_pm_runtime_put(pc_to_xe(pc));
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
@@ -111,7 +129,10 @@ static ssize_t idle_residency_ms_show(struct device *dev,
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
+ xe_pm_runtime_get(pc_to_xe(pc));
residency = gtidle->idle_residency(pc);
+ xe_pm_runtime_put(pc_to_xe(pc));
+
return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
}
static DEVICE_ATTR_RO(idle_residency_ms);
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
index 63d640591a52..9c33045ff1ef 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
@@ -11,6 +11,7 @@
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle_sysfs.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
/**
* DOC: Xe GT Throttle
@@ -38,10 +39,12 @@ static u32 read_perf_limit_reasons(struct xe_gt *gt)
{
u32 reg;
+ xe_pm_runtime_get(gt_to_xe(gt));
if (xe_gt_is_media_type(gt))
reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS);
else
reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS);
+ xe_pm_runtime_put(gt_to_xe(gt));
return reg;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index 2345fb42fa39..9e23ca7f45ad 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -9,6 +9,7 @@
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
+#include "xe_pm.h"
#define MAX_ENGINE_CLASS_NAME_LEN 16
static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
@@ -513,6 +514,7 @@ kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, char *name
kobject_put(&keclass->base);
return NULL;
}
+ keclass->xe = xe;
err = drmm_add_action_or_reset(&xe->drm, kobj_xe_hw_engine_class_fini,
&keclass->base);
@@ -567,9 +569,63 @@ static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
kfree(kobj);
}
+#include "xe_pm.h"
+
+static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+static inline struct xe_device *to_xe_device(const struct drm_device *dev)
+{
+ return container_of(dev, struct xe_device, drm);
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->show(kobj, kattr, buf);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->store(kobj, kattr, buf, count);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+ .show = xe_hw_engine_class_sysfs_attr_show,
+ .store = xe_hw_engine_class_sysfs_attr_store,
+};
+
static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
.release = xe_hw_engine_sysfs_kobj_release,
- .sysfs_ops = &kobj_sysfs_ops,
+ .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
};
static void hw_engine_class_sysfs_fini(struct drm_device *drm, void *arg)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
index ec5ba673b314..28a0d7c909c0 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
@@ -26,6 +26,8 @@ struct kobj_eclass {
struct kobject base;
/** @eclass: A pointer to the hw engine class interface */
struct xe_hw_engine_class_intf *eclass;
+ /** @xe: A pointer to the xe device */
+ struct xe_device *xe;
};
static inline struct xe_hw_engine_class_intf *kobj_to_eclass(struct kobject *kobj)
@@ -33,4 +35,9 @@ static inline struct xe_hw_engine_class_intf *kobj_to_eclass(struct kobject *kob
return container_of(kobj, struct kobj_eclass, base)->eclass;
}
+static inline struct xe_device *kobj_to_xe(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_eclass, base)->xe;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index 0662968d7bcb..237a0761d3ad 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include <drm/drm_managed.h>
+#include "xe_pm.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_vram_freq.h"
--
2.43.2
next prev parent reply other threads:[~2024-02-22 16:40 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-22 16:39 [PATCH 01/14] drm/xe: Document Xe PM component Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 02/14] drm/xe: Convert mem_access assertion towards the runtime_pm state Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 03/14] drm/xe: Runtime PM wake on every IOCTL Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 04/14] drm/xe: Convert kunit tests from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-22 16:56 ` Matthew Auld
2024-02-22 16:39 ` Rodrigo Vivi [this message]
2024-02-22 16:39 ` [PATCH 06/14] drm/xe: Remove mem_access from guc_pc calls Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 07/14] drm/xe: Runtime PM wake on every debugfs call Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 08/14] drm/xe: Replace dma_buf mem_access per direct xe_pm_runtime calls Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 09/14] drm/xe: Convert hwmon from mem_access to " Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 10/14] drm/xe: Remove useless mem_access protection for query ioctls Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 11/14] drm/xe: Convert gsc_work from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 12/14] drm/xe: Remove mem_access from suspend and resume functions Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 13/14] drm/xe: Convert gt_reset from mem_access to xe_pm_runtime Rodrigo Vivi
2024-02-22 16:39 ` [PATCH 14/14] drm/xe: Remove useless mem_access on PAT dumps Rodrigo Vivi
2024-02-22 16:45 ` ✓ CI.Patch_applied: success for series starting with [01/14] drm/xe: Document Xe PM component Patchwork
2024-02-22 16:46 ` ✓ CI.checkpatch: " Patchwork
2024-02-22 16:47 ` ✓ CI.KUnit: " Patchwork
2024-02-22 16:57 ` ✓ CI.Build: " Patchwork
2024-02-22 16:59 ` ✓ CI.Hooks: " Patchwork
2024-02-22 17:01 ` ✓ CI.checksparse: " Patchwork
2024-02-22 17:20 ` ✓ CI.BAT: " Patchwork
2024-02-25 9:08 ` [PATCH 01/14] " Gupta, Anshuman
-- strict thread matches above, loose matches on Subject: below --
2024-02-15 19:34 Rodrigo Vivi
2024-02-15 19:34 ` [PATCH 05/14] drm/xe: Runtime PM wake on every sysfs call Rodrigo Vivi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240222163937.138342-5-rodrigo.vivi@intel.com \
--to=rodrigo.vivi@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.auld@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox