AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Alex Deucher <alexander.deucher@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Yang Wang <kevinyang.wang@amd.com>,
	Asad Kamal <asad.kamal@amd.com>,
	"Lijo Lazar" <lijo.lazar@amd.com>,
	Alex Deucher <alexander.deucher@amd.com>
Subject: [PATCH 11/25] drm/amd/pm: add get_gpu_metrics support for 15.0.8
Date: Tue, 17 Mar 2026 16:12:27 -0400	[thread overview]
Message-ID: <20260317201242.3808136-11-alexander.deucher@amd.com> (raw)
In-Reply-To: <20260317201242.3808136-1-alexander.deucher@amd.com>

From: Yang Wang <kevinyang.wang@amd.com>

export .get_gpu_metrics interface for 15.0.8

v2: Remove members already exposed by other interfaces, use mask,
logical conversion (Lijo)

v3: Use correct logic for hbm stacks loop (Lijo)
Remove buffer allocation

v4: Make out of bound check outside loop (Lijo)

Signed-off-by: Yang Wang <kevinyang.wang@amd.com>
Signed-off-by: Asad Kamal <asad.kamal@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    |   1 +
 .../drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c  | 174 +++++++++++++++++-
 .../drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h  | 126 +++++++++++++
 3 files changed, 300 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index a9b73f4fd4661..22ee30f514c0a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -585,6 +585,7 @@ enum amdgpu_metrics_attr_id {
 	AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
 	AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
 	AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HBM,
+	AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MID,
 	AMDGPU_METRICS_ATTR_ID_TEMPERATURE_AID,
 	AMDGPU_METRICS_ATTR_ID_TEMPERATURE_XCD,
 	AMDGPU_METRICS_ATTR_ID_MAX,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c
index 60a43ce5648a5..c13804c32e706 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.c
@@ -56,6 +56,13 @@
 #define SMUQ10_FRAC(x) ((x) & 0x3ff)
 #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
 
+#define hbm_stack_mask_valid(umc_mask) \
+	(((umc_mask) & 0xF) == 0xF)
+
+#define for_each_hbm_stack(stack_idx, umc_mask) \
+	for ((stack_idx) = 0; (umc_mask); \
+	     (umc_mask) >>= 4, (stack_idx)++) \
+
 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
 
 #define SMU_15_0_8_FEA_MAP(smu_feature, smu_15_0_8_feature)                    \
@@ -167,8 +174,9 @@ static const struct cmn2asic_mapping smu_v15_0_8_table_map[SMU_TABLE_COUNT] = {
 static int smu_v15_0_8_tables_init(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
+	int ret, gpu_metrcs_size = sizeof(MetricsTable_t);
 	struct smu_table *tables = smu_table->tables;
-	int gpu_metrcs_size = sizeof(MetricsTable_t);
+	struct smu_v15_0_8_gpu_metrics *gpu_metrics;
 	void *driver_pptable __free(kfree) = NULL;
 	void *metrics_table __free(kfree) = NULL;
 
@@ -190,6 +198,15 @@ static int smu_v15_0_8_tables_init(struct smu_context *smu)
 	if (!driver_pptable)
 		return -ENOMEM;
 
+	ret = smu_driver_table_init(smu, SMU_DRIVER_TABLE_GPU_METRICS,
+				    sizeof(struct smu_v15_0_8_gpu_metrics),
+				    SMU_GPU_METRICS_CACHE_INTERVAL);
+	if (ret)
+		return ret;
+
+	gpu_metrics = (struct smu_v15_0_8_gpu_metrics *)smu_driver_table_ptr(smu,
+		       SMU_DRIVER_TABLE_GPU_METRICS);
+	smu_v15_0_8_gpu_metrics_init(gpu_metrics, 1, 9);
 	smu_table->metrics_table = no_free_ptr(metrics_table);
 	smu_table->driver_pptable = no_free_ptr(driver_pptable);
 
@@ -967,6 +984,160 @@ static int smu_v15_0_8_mode2_reset(struct smu_context *smu)
 	return ret;
 }
 
+static ssize_t smu_v15_0_8_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct smu_v15_0_8_gpu_metrics *gpu_metrics;
+	struct amdgpu_device *adev = smu->adev;
+	int ret = 0, xcc_id, inst, i, j, idx;
+	uint32_t aid_mask = adev->aid_mask;
+	uint32_t mid_mask = adev->aid_mask;
+	MetricsTable_t *metrics;
+
+	metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+
+	ret = smu_v15_0_8_get_metrics_table_internal(smu, 1, NULL);
+	if (ret)
+		return ret;
+
+	metrics = (MetricsTable_t *)smu_table->metrics_table;
+	gpu_metrics = (struct smu_v15_0_8_gpu_metrics *)smu_driver_table_ptr(smu,
+		       SMU_DRIVER_TABLE_GPU_METRICS);
+
+	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+	gpu_metrics->temperature_hotspot = SMUQ10_ROUND(metrics->MaxSocketTemperature);
+
+	/* Per-HBM stack temperatures */
+	if (adev->umc.active_mask) {
+		u64 mask = adev->umc.active_mask;
+		int out_idx = 0;
+		int stack_idx;
+
+		if (unlikely(hweight64(mask)/4 > SMU_15_0_8_MAX_HBM_STACKS))
+			dev_warn(adev->dev, "Invalid umc mask %lld\n", mask);
+		else  {
+			for_each_hbm_stack(stack_idx, mask) {
+				if (!hbm_stack_mask_valid(mask))
+					continue;
+				gpu_metrics->temperature_hbm[out_idx++] =
+					SMUQ10_ROUND(metrics->HbmTemperature[stack_idx]);
+			}
+		}
+	}
+
+	/* Reports max temperature of all voltage rails */
+	gpu_metrics->temperature_vrsoc = SMUQ10_ROUND(metrics->MaxVrTemperature);
+	/* MID, AID, XCD temperatures */
+	idx = 0;
+	for_each_inst(i, mid_mask) {
+		gpu_metrics->temperature_mid[idx] = SMUQ10_ROUND(metrics->MidTemperature[i]);
+		idx++;
+	}
+
+	idx = 0;
+	for_each_inst(i, aid_mask) {
+		gpu_metrics->temperature_aid[idx] = SMUQ10_ROUND(metrics->AidTemperature[i]);
+		idx++;
+	}
+
+	for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+		xcc_id = GET_INST(GC, i);
+		if (xcc_id >= 0)
+			gpu_metrics->temperature_xcd[i] = SMUQ10_ROUND(metrics->XcdTemperature[xcc_id]);
+	}
+	/* Power */
+	gpu_metrics->curr_socket_power = SMUQ10_ROUND(metrics->SocketPower);
+
+	gpu_metrics->average_gfx_activity = SMUQ10_ROUND(metrics->SocketGfxBusy);
+	gpu_metrics->average_umc_activity = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+	gpu_metrics->mem_max_bandwidth = SMUQ10_ROUND(metrics->MaxDramBandwidth);
+
+	/* Energy counter reported in 15.259uJ (2^-16) units */
+	gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+	for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+		xcc_id = GET_INST(GC, i);
+		if (xcc_id >= 0) {
+			gpu_metrics->current_gfxclk[i] =
+				SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+		}
+	}
+
+	/* Per-MID clocks */
+	idx = 0;
+	for_each_inst(i, mid_mask) {
+		gpu_metrics->current_socclk[idx] = SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+		idx++;
+	}
+
+	/* Per-VCN clocks */
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		inst = GET_INST(VCN, i);
+		if (inst >= 0) {
+			gpu_metrics->current_vclk0[i] = SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+			gpu_metrics->current_dclk0[i] = SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+		}
+	}
+
+	/* Per-AID clocks */
+	idx = 0;
+	for_each_inst(i, aid_mask) {
+		gpu_metrics->current_uclk[idx] = SMUQ10_ROUND(metrics->UclkFrequency[i]);
+		idx++;
+	}
+
+	/* Total accumulated cycle counter */
+	gpu_metrics->accumulation_counter = metrics->AccumulationCounter;
+
+	/* Accumulated throttler residencies */
+	gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc;
+	gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc;
+	gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc;
+	gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc;
+	gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc;
+
+	gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+	gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+
+	for (i = 0; i < NUM_XGMI_LINKS; i++) {
+		j = amdgpu_xgmi_get_ext_link(adev, i);
+		if (j < 0 || j >= NUM_XGMI_LINKS)
+			continue;
+		ret = amdgpu_get_xgmi_link_status(adev, i);
+		if (ret >= 0)
+			gpu_metrics->xgmi_link_status[j] = ret;
+	}
+
+	gpu_metrics->xgmi_read_data_acc = SMUQ10_ROUND(metrics->XgmiReadBandwidthAcc);
+	gpu_metrics->xgmi_write_data_acc = SMUQ10_ROUND(metrics->XgmiWriteBandwidthAcc);
+
+	for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+		inst = GET_INST(GC, i);
+		gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+		gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+		gpu_metrics->gfx_below_host_limit_ppt_acc[i] =
+			SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+		gpu_metrics->gfx_below_host_limit_thm_acc[i] =
+			SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+		gpu_metrics->gfx_low_utilization_acc[i] =
+			SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+		gpu_metrics->gfx_below_host_limit_total_acc[i] =
+			SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+	}
+
+	gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
+	gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate;
+
+	gpu_metrics->firmware_timestamp = metrics->Timestamp;
+
+	*table = gpu_metrics;
+
+	smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_GPU_METRICS);
+
+	return sizeof(*gpu_metrics);
+}
+
 static const struct pptable_funcs smu_v15_0_8_ppt_funcs = {
 	.init_allowed_features = smu_v15_0_8_init_allowed_features,
 	.set_default_dpm_table = smu_v15_0_8_set_default_dpm_table,
@@ -990,6 +1161,7 @@ static const struct pptable_funcs smu_v15_0_8_ppt_funcs = {
 	.get_pm_metrics = smu_v15_0_8_get_pm_metrics,
 	.mode2_reset = smu_v15_0_8_mode2_reset,
 	.get_dpm_ultimate_freq = smu_v15_0_8_get_dpm_ultimate_freq,
+	.get_gpu_metrics = smu_v15_0_8_get_gpu_metrics,
 };
 
 static void smu_v15_0_8_init_msg_ctl(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h
index 6c85f23d31116..8fc16796788b5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_8_ppt.h
@@ -23,6 +23,15 @@
 #ifndef __SMU_15_0_8_PPT_H__
 #define __SMU_15_0_8_PPT_H__
 
+#define SMU_15_0_8_NUM_XGMI_LINKS 8
+#define SMU_15_0_8_MAX_GFX_CLKS 8
+#define SMU_15_0_8_MAX_CLKS 4
+#define SMU_15_0_8_MAX_XCC 8
+#define SMU_15_0_8_MAX_VCN 4
+#define SMU_15_0_8_MAX_JPEG 40
+#define SMU_15_0_8_MAX_AID 2
+#define SMU_15_0_8_MAX_MID 2
+#define SMU_15_0_8_MAX_HBM_STACKS 12
 extern void smu_v15_0_8_set_ppt_funcs(struct smu_context *smu);
 
 typedef struct {
@@ -55,4 +64,121 @@ typedef struct {
 	bool init;
 } PPTable_t;
 
+#if defined(SWSMU_CODE_LAYER_L2)
+#include "smu_cmn.h"
+
+/* SMUv 15.0.8 GPU metrics*/
+#define SMU_15_0_8_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY)                       \
+	SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1),          \
+		   SMU_MTYPE(U16), temperature_hotspot);                       \
+	SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1),              \
+		   SMU_MTYPE(U16), temperature_mem);                           \
+	SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1),            \
+		   SMU_MTYPE(U16), temperature_vrsoc);                         \
+	SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1),               \
+		  SMU_MTYPE(U16), temperature_hbm,                             \
+		  SMU_15_0_8_MAX_HBM_STACKS);                                  \
+	SMU_ARRAY(SMU_MATTR(TEMPERATURE_MID), SMU_MUNIT(TEMP_1),               \
+		  SMU_MTYPE(U16), temperature_mid, SMU_15_0_8_MAX_MID);        \
+	SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1),               \
+		  SMU_MTYPE(U16), temperature_aid, SMU_15_0_8_MAX_AID);        \
+	SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1),               \
+		  SMU_MTYPE(U16), temperature_xcd, SMU_15_0_8_MAX_XCC);        \
+	SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1),           \
+		   SMU_MTYPE(U16), curr_socket_power);                         \
+	SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT),        \
+		   SMU_MTYPE(U16), average_gfx_activity);                      \
+	SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT),        \
+		   SMU_MTYPE(U16), average_umc_activity);                      \
+	SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1),              \
+		   SMU_MTYPE(U64), mem_max_bandwidth);                         \
+	SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE),             \
+		   SMU_MTYPE(U64), energy_accumulator);                        \
+	SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1),         \
+		   SMU_MTYPE(U64), system_clock_counter);                      \
+	SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE),           \
+		   SMU_MTYPE(U64), accumulation_counter);                      \
+	SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE),          \
+		   SMU_MTYPE(U64), prochot_residency_acc);                     \
+	SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE),              \
+		   SMU_MTYPE(U64), ppt_residency_acc);                         \
+	SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE),       \
+		   SMU_MTYPE(U64), socket_thm_residency_acc);                  \
+	SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE),           \
+		   SMU_MTYPE(U64), vr_thm_residency_acc);                      \
+	SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE),          \
+		   SMU_MTYPE(U64), hbm_thm_residency_acc);                     \
+	SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE),             \
+		   SMU_MTYPE(U32), gfxclk_lock_status);                        \
+	SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE),                \
+		   SMU_MTYPE(U16), pcie_link_width);                           \
+	SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2),             \
+		   SMU_MTYPE(U16), pcie_link_speed);                           \
+	SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE),                \
+		   SMU_MTYPE(U16), xgmi_link_width);                           \
+	SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1),             \
+		   SMU_MTYPE(U16), xgmi_link_speed);                           \
+	SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(NONE),               \
+		   SMU_MTYPE(U64), gfx_activity_acc);                          \
+	SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(NONE),               \
+		   SMU_MTYPE(U64), mem_activity_acc);                          \
+	SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(NONE),              \
+		  SMU_MTYPE(U64), pcie_bandwidth_acc, SMU_15_0_8_MAX_MID);     \
+	SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1),             \
+		  SMU_MTYPE(U32), pcie_bandwidth_inst, SMU_15_0_8_MAX_MID);    \
+	SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE),     \
+		   SMU_MTYPE(U64), pcie_l0_to_recov_count_acc);                \
+	SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE),          \
+		   SMU_MTYPE(U64), pcie_replay_count_acc);                     \
+	SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE),    \
+		   SMU_MTYPE(U64), pcie_replay_rover_count_acc);               \
+	SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE),        \
+		   SMU_MTYPE(U64), pcie_nak_sent_count_acc);                   \
+	SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE),        \
+		   SMU_MTYPE(U64), pcie_nak_rcvd_count_acc);                   \
+	SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE),                \
+		  SMU_MTYPE(U16), xgmi_link_status,                            \
+		  SMU_15_0_8_NUM_XGMI_LINKS);                                  \
+	SMU_SCALAR(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1),           \
+		   SMU_MTYPE(U64), xgmi_read_data_acc);                        \
+	SMU_SCALAR(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1),          \
+		   SMU_MTYPE(U64), xgmi_write_data_acc);                                  \
+	SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2),           \
+		   SMU_MTYPE(U64), firmware_timestamp);                        \
+	SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1),               \
+		  SMU_MTYPE(U16), current_gfxclk, SMU_15_0_8_MAX_GFX_CLKS);    \
+	SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1),               \
+		  SMU_MTYPE(U16), current_socclk, SMU_15_0_8_MAX_MID);         \
+	SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1),                \
+		  SMU_MTYPE(U16), current_vclk0, SMU_15_0_8_MAX_VCN);          \
+	SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1),                \
+		  SMU_MTYPE(U16), current_dclk0, SMU_15_0_8_MAX_VCN);          \
+	SMU_ARRAY(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1),                 \
+		  SMU_MTYPE(U16), current_uclk, SMU_15_0_8_MAX_AID);           \
+	SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY),                 \
+		   SMU_MUNIT(NONE), SMU_MTYPE(U64),                            \
+		   pcie_lc_perf_other_end_recovery);                           \
+	SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT),                \
+		  SMU_MTYPE(U32), gfx_busy_inst, SMU_15_0_8_MAX_XCC);          \
+	SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16),    \
+		  jpeg_busy, SMU_15_0_8_MAX_JPEG);                             \
+	SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16),     \
+		  vcn_busy, SMU_15_0_8_MAX_VCN);                               \
+	SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(NONE), SMU_MTYPE(U64),    \
+		  gfx_busy_acc, SMU_15_0_8_MAX_XCC);                           \
+	SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE),    \
+		  SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc,                \
+		  SMU_15_0_8_MAX_XCC);                                         \
+	SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE),    \
+		  SMU_MTYPE(U64), gfx_below_host_limit_thm_acc,                \
+		  SMU_15_0_8_MAX_XCC);                                         \
+	SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE),         \
+		  SMU_MTYPE(U64), gfx_low_utilization_acc,                     \
+		  SMU_15_0_8_MAX_XCC);                                         \
+	SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE),  \
+		  SMU_MTYPE(U64), gfx_below_host_limit_total_acc,              \
+		  SMU_15_0_8_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_gpu_metrics, SMU_15_0_8_METRICS_FIELDS);
+#endif
 #endif
-- 
2.53.0


  parent reply	other threads:[~2026-03-17 20:13 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-17 20:12 [PATCH 01/25] drm/amd/pm: Add smu v15_0_8 driver interface header Alex Deucher
2026-03-17 20:12 ` [PATCH 02/25] drm/amd/pm: Add smu v15_0_8 message header Alex Deucher
2026-03-17 20:12 ` [PATCH 03/25] drm/amd/pm: Add smu v15_0_8 pmfw header Alex Deucher
2026-03-17 20:12 ` [PATCH 04/25] drm/amd/pm: Add initial support for smu v15_0_8 Alex Deucher
2026-03-17 20:12 ` [PATCH 05/25] drm/amd/pm: Add mode2 support for smu_v15_0_8 Alex Deucher
2026-03-17 20:12 ` [PATCH 06/25] drm/amd/pm: Add static metrics support Alex Deucher
2026-03-17 20:12 ` [PATCH 07/25] drm/amd/pm: Setup driver pptable for smu 15.0.8 Alex Deucher
2026-03-17 20:12 ` [PATCH 08/25] drm/amd/pm: Update dpm table structs for smu_v15_0 Alex Deucher
2026-03-17 20:12 ` [PATCH 09/25] drm/amd/pm: Add default dpm table support for smu 15.0.8 Alex Deucher
2026-03-17 20:12 ` [PATCH 10/25] drm/amd/pm: Add get_pm_metrics " Alex Deucher
2026-03-17 20:12 ` Alex Deucher [this message]
2026-03-17 20:12 ` [PATCH 12/25] drm/amd/pm: add get_unique_id " Alex Deucher
2026-03-17 20:12 ` [PATCH 13/25] drm/amd/pm: add set{get}_power_limit " Alex Deucher
2026-03-17 20:12 ` [PATCH 14/25] drm/amd/pm: Add emit clock support Alex Deucher
2026-03-17 20:12 ` [PATCH 15/25] drm/amd/pm: add populate_umd_state_clk support Alex Deucher
2026-03-17 20:12 ` [PATCH 16/25] drm/amd/pm: Add set_performance_support Alex Deucher
2026-03-17 20:12 ` [PATCH 17/25] drm/amd/pm: Add od_edit_dpm_table support Alex Deucher
2026-03-17 20:12 ` [PATCH 18/25] drm/amd/pm: Add get_thermal_temperature_range support Alex Deucher
2026-03-17 20:12 ` [PATCH 19/25] drm/amd/pm: Add ppt1 support Alex Deucher
2026-03-17 20:12 ` [PATCH 20/25] drm/amd/pm: Add read sensor support Alex Deucher
2026-03-17 20:12 ` [PATCH 21/25] drm/amd/pm: Add gpuboard temperature metrics support Alex Deucher
2026-03-17 20:12 ` [PATCH 22/25] drm/amd/pm: Add baseboard " Alex Deucher
2026-03-17 20:12 ` [PATCH 23/25] drm/amd/pm: Add NPM support for smu_v15_0_8 Alex Deucher
2026-03-17 20:12 ` [PATCH 24/25] drm/amdgpu: Add smu v15_0_8 ip block Alex Deucher
2026-03-17 20:12 ` [PATCH 25/25] drm/amd/pm: Enable user specified gfx clock ranges Alex Deucher

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260317201242.3808136-11-alexander.deucher@amd.com \
    --to=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=asad.kamal@amd.com \
    --cc=kevinyang.wang@amd.com \
    --cc=lijo.lazar@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox