AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
To: amd-gfx@lists.freedesktop.org
Cc: Eryk.Brol@amd.com, Sunpeng.Li@amd.com, Harry.Wentland@amd.com,
	qingqing.zhuo@amd.com, Rodrigo.Siqueira@amd.com,
	roman.li@amd.com, Aurabindo.Pillai@amd.com,
	Tony Cheng <Tony.Cheng@amd.com>,
	Yongqiang Sun <yongqiang.sun@amd.com>,
	Bhawanpreet.Lakha@amd.com, bindu.r@amd.com
Subject: [PATCH 18/18] drm/amd/display: init soc bounding box for dcn3.01.
Date: Fri, 20 Nov 2020 15:19:58 -0500	[thread overview]
Message-ID: <20201120201958.2455002-19-Rodrigo.Siqueira@amd.com> (raw)
In-Reply-To: <20201120201958.2455002-1-Rodrigo.Siqueira@amd.com>

From: Yongqiang Sun <yongqiang.sun@amd.com>

[Why & How]
Update init soc bounding box and bw bounding box for DCN3.01.
Remove pp smu interface which isn't used.

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
---
 .../amd/display/dc/dcn301/dcn301_resource.c   | 118 +++++++++---------
 1 file changed, 60 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4b029631a22c..124ae5253d4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1345,9 +1345,6 @@ static void dcn301_destruct(struct dcn301_resource_pool *pool)
 
 	if (pool->base.dccg != NULL)
 		dcn_dccg_destroy(&pool->base.dccg);
-
-	if (pool->base.pp_smu != NULL)
-		dcn301_pp_smu_destroy(&pool->base.pp_smu);
 }
 
 struct hubp *dcn301_hubp_create(
@@ -1600,41 +1597,25 @@ static bool init_soc_bounding_box(struct dc *dc,
 		}
 	}
 
-	if (pool->base.pp_smu) {
-		struct pp_smu_nv_clock_table max_clocks = {0};
-		unsigned int uclk_states[8] = {0};
-		unsigned int num_states = 0;
-		enum pp_smu_status status;
-		bool clock_limits_available = false;
-		bool uclk_states_available = false;
+	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
+	loaded_ip->max_num_dpp = pool->base.pipe_count;
+	dcn20_patch_bounding_box(dc, loaded_bb);
 
-		if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
-			status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
-				(&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
+	if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+		struct bp_soc_bb_info bb_info = {0};
 
-			uclk_states_available = (status == PP_SMU_RESULT_OK);
-		}
+		if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+			if (bb_info.dram_clock_change_latency_100ns > 0)
+				dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
 
-		if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
-			status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
-					(&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
-			/* SMU cannot set DCF clock to anything equal to or higher than SOC clock
-			 */
-			if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
-				max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
-			clock_limits_available = (status == PP_SMU_RESULT_OK);
-		}
+			if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+				dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
 
-		if (clock_limits_available && uclk_states_available && num_states)
-			dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
-		else if (clock_limits_available)
-			dcn20_cap_soc_clocks(loaded_bb, max_clocks);
+			if (bb_info.dram_sr_exit_latency_100ns > 0)
+				dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+		}
 	}
 
-	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
-	loaded_ip->max_num_dpp = pool->base.pipe_count;
-	dcn20_patch_bounding_box(dc, loaded_bb);
-
 	return true;
 }
 
@@ -1682,36 +1663,58 @@ static void set_wm_ranges(
 	pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
 }
 
-static struct pp_smu_funcs *dcn301_pp_smu_create(struct dc_context *ctx)
+static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
 {
-	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
-
-	if (!pp_smu)
-		return pp_smu;
-
-	if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && !IS_DIAG_DC(ctx->dce_environment)) {
-		dm_pp_get_funcs(ctx, pp_smu);
-
-		/* TODO: update once we have n21 smu*/
-		if (pp_smu->ctx.ver != PP_SMU_VER_NV)
-			pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
-	}
-
-	return pp_smu;
-}
+	struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
+	struct clk_limit_table *clk_table = &bw_params->clk_table;
+	struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+	unsigned int i, closest_clk_lvl;
+	int j;
+
+	// Default clock levels are used for diags, which may lead to overclocking.
+	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+		dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+		dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
+		dcn3_01_soc.num_chans = bw_params->num_channels;
+
+		ASSERT(clk_table->num_entries);
+		for (i = 0; i < clk_table->num_entries; i++) {
+			/* loop backwards*/
+			for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
+				if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+					closest_clk_lvl = j;
+					break;
+				}
+			}
 
-static void dcn301_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
-{
-	if (pp_smu && *pp_smu) {
-		kfree(*pp_smu);
-		*pp_smu = NULL;
+			clock_limits[i].state = i;
+			clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+			clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+			clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+			clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+			clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+			clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+			clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+			clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+			clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+			clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+			clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+		}
+		for (i = 0; i < clk_table->num_entries; i++)
+			dcn3_01_soc.clock_limits[i] = clock_limits[i];
+		if (clk_table->num_entries) {
+			dcn3_01_soc.num_states = clk_table->num_entries;
+			/* duplicate last level */
+			dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+			dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+		}
 	}
-}
 
-static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
 	dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
 	dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+	dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
 }
 
 static struct resource_funcs dcn301_res_pool_funcs = {
@@ -1862,9 +1865,8 @@ static bool dcn301_resource_construct(
 		goto create_fail;
 	}
 
-	/* PP Lib and SMU interfaces */
-	pool->base.pp_smu = dcn301_pp_smu_create(ctx);
 	init_soc_bounding_box(dc, pool);
+
 	if (!dc->debug.disable_pplib_wm_range && pool->base.pp_smu->nv_funcs.set_wm_ranges)
 		set_wm_ranges(pool->base.pp_smu, &dcn3_01_soc);
 
-- 
2.29.2

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

      parent reply	other threads:[~2020-11-20 20:20 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-20 20:19 [PATCH 00/18] DC 3.2.113 Patches, November 20, 2020 Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 01/18] drm/amd/display: add i2c speed arbitration for dc_i2c and hdcp_i2c Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 02/18] drm/amd/display: Source minimum HBlank support Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 03/18] drm/amd/display: Update panel register Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 04/18] drm/amd/display: Enable stutter for dcn3.01 Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 05/18] drm/amd/display: Add DMCU memory low power support Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 06/18] drm/amd/display: intermittent underflow observed when PIP is toggled in Full screen Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 07/18] drm/amd/display: expose clk_mgr functions for reuse Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 08/18] drm/amd/display: change hw sequence Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 09/18] drm/amd/display: Clear sticky vsc sdp error bit Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 10/18] drm/amd/display: Add BLNDGAM memory shutdown support Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 11/18] drm/amd/display: Add internal display info Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 12/18] drm/amd/display: Check multiple internal displays for power optimization Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 13/18] drm/amd/display: remove macro which is in header already Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 14/18] drm/amd/display: Add GAMCOR memory shutdown support Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 15/18] drm/amd/display: enable pipe power gating by default Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 16/18] drm/amd/display: 3.2.113 Rodrigo Siqueira
2020-11-20 20:19 ` [PATCH 17/18] drm/amd/display: To update backlight restore mechanism Rodrigo Siqueira
2020-11-20 20:19 ` Rodrigo Siqueira [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201120201958.2455002-19-Rodrigo.Siqueira@amd.com \
    --to=rodrigo.siqueira@amd.com \
    --cc=Aurabindo.Pillai@amd.com \
    --cc=Bhawanpreet.Lakha@amd.com \
    --cc=Eryk.Brol@amd.com \
    --cc=Harry.Wentland@amd.com \
    --cc=Sunpeng.Li@amd.com \
    --cc=Tony.Cheng@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=bindu.r@amd.com \
    --cc=qingqing.zhuo@amd.com \
    --cc=roman.li@amd.com \
    --cc=yongqiang.sun@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox