From: <Roman.Li@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Daniel Wheeler <daniel.wheeler@amd.com>, <Harry.Wentland@amd.com>,
<Sunpeng.Li@amd.com>, <Rodrigo.Siqueira@amd.com>,
<Aurabindo.Pillai@amd.com>, <roman.li@amd.com>,
<wayne.lin@amd.com>, <solomon.chiu@amd.com>,
<agustin.gutierrez@amd.com>, <hamza.mahfooz@amd.com>,
Wenjing Liu <wenjing.liu@amd.com>,
Dillon Varone <dillon.varone@amd.com>
Subject: [PATCH 04/43] drm/amd/display: optimize dml2 pipe resource allocation order
Date: Thu, 28 Mar 2024 15:50:08 -0400 [thread overview]
Message-ID: <20240328195047.2843715-5-Roman.Li@amd.com> (raw)
In-Reply-To: <20240328195047.2843715-1-Roman.Li@amd.com>
From: Wenjing Liu <wenjing.liu@amd.com>
[why]
There could be cases that we are transition from MPC to ODM combine.
In this case if we map pipes before unmapping MPC pipes, we might
temporarly run out of pipes. The change reorders pipe resource
allocation. So we unmapping pipes before mapping new pipes.
Reviewed-by: Dillon Varone <dillon.varone@amd.com>
Acked-by: Roman Li <roman.li@amd.com>
Signed-off-by: Wenjing Liu <wenjing.liu@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
---
.../gpu/drm/amd/display/dc/core/dc_resource.c | 2 +
.../display/dc/dml2/dml2_dc_resource_mgmt.c | 126 ++++++++++++------
.../amd/display/dc/dml2/dml2_internal_types.h | 11 ++
.../drm/amd/display/dc/dml2/dml2_wrapper.h | 2 +
4 files changed, 97 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index dd0428024173..601af21b2df9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -5052,7 +5052,9 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
+ dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count;
dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
+ dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count;
dml2_options->callbacks.get_opp_head = &resource_get_opp_head;
dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream;
dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index b64e0160d482..27d9da8ad7c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -793,7 +793,7 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index);
}
-static unsigned int get_mpc_factor(struct dml2_context *ctx,
+static unsigned int get_target_mpc_factor(struct dml2_context *ctx,
struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
@@ -822,7 +822,7 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
return mpc_factor;
}
-static unsigned int get_odm_factor(
+static unsigned int get_target_odm_factor(
const struct dml2_context *ctx,
struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
@@ -849,79 +849,117 @@ static unsigned int get_odm_factor(
return 1;
}
+static unsigned int get_source_odm_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream);
+
+ return ctx->config.callbacks.get_odm_slice_count(otg_master);
+}
+
+static unsigned int get_source_mpc_factor(const struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_plane_state *plane)
+{
+ struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ int dpp_pipe_count = ctx->config.callbacks.get_dpp_pipes_for_plane(plane,
+ &state->res_ctx, dpp_pipes);
+
+ ASSERT(dpp_pipe_count > 0);
+ return ctx->config.callbacks.get_mpc_slice_count(dpp_pipes[0]);
+}
+
+
static void populate_mpc_factors_for_stream(
struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
struct dc_state *state,
unsigned int stream_idx,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
const struct dc_stream_status *status = &state->stream_status[stream_idx];
int i;
- for (i = 0; i < status->plane_count; i++)
- if (odm_factor == 1)
- mpc_factors[i] = get_mpc_factor(
- ctx, state, disp_cfg, mapping, status,
- state->streams[stream_idx], i);
- else
- mpc_factors[i] = 1;
+ for (i = 0; i < status->plane_count; i++) {
+ mpc_factors[i].source = get_source_mpc_factor(ctx, state, status->plane_states[i]);
+ mpc_factors[i].target = (odm_factor.target == 1) ?
+ get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1;
+ }
}
static void populate_odm_factors(const struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
struct dc_state *state,
- unsigned int odm_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES])
{
int i;
- for (i = 0; i < state->stream_count; i++)
- odm_factors[i] = get_odm_factor(
+ for (i = 0; i < state->stream_count; i++) {
+ odm_factors[i].source = get_source_odm_factor(ctx, state, state->streams[i]);
+ odm_factors[i].target = get_target_odm_factor(
ctx, state, disp_cfg, mapping, state->streams[i]);
+ }
}
-static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+static bool unmap_dc_pipes_for_stream(struct dml2_context *ctx,
struct dc_state *state,
const struct dc_state *existing_state,
const struct dc_stream_state *stream,
const struct dc_stream_status *status,
- unsigned int odm_factor,
- unsigned int mpc_factors[MAX_PIPES])
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
{
int plane_idx;
bool result = true;
- if (odm_factor == 1)
- /*
- * ODM and MPC combines are by DML design mutually exclusive.
- * ODM factor of 1 means MPC factors may be greater than 1.
- * In this case, we want to set ODM factor to 1 first to free up
- * pipe resources from previous ODM configuration before setting
- * up MPC combine to acquire more pipe resources.
- */
+ for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
+ if (mpc_factors[plane_idx].target < mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target < odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
+ return result;
+}
+
+static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
+ struct dc_state *state,
+ const struct dc_state *existing_state,
+ const struct dc_stream_state *stream,
+ const struct dc_stream_status *status,
+ struct dml2_pipe_combine_factor odm_factor,
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES])
+{
+ int plane_idx;
+ bool result = true;
+
for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
- result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
- state,
- existing_state,
- ctx->config.callbacks.dc->res_pool,
- status->plane_states[plane_idx],
- mpc_factors[plane_idx]);
- if (odm_factor > 1)
+ if (mpc_factors[plane_idx].target > mpc_factors[plane_idx].source)
+ result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
+ state,
+ existing_state,
+ ctx->config.callbacks.dc->res_pool,
+ status->plane_states[plane_idx],
+ mpc_factors[plane_idx].target);
+ if (odm_factor.target > odm_factor.source)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
- odm_factor);
+ odm_factor.target);
return result;
}
@@ -931,20 +969,20 @@ static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_state *existing_state)
{
- unsigned int odm_factors[MAX_PIPES];
- unsigned int mpc_factors_for_stream[MAX_PIPES];
int i;
bool result = true;
- populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors);
- for (i = 0; i < state->stream_count; i++) {
+ populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors);
+ for (i = 0; i < state->stream_count; i++)
populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state,
- i, odm_factors[i], mpc_factors_for_stream);
- result &= map_dc_pipes_for_stream(ctx, state, existing_state,
- state->streams[i],
- &state->stream_status[i],
- odm_factors[i], mpc_factors_for_stream);
- }
+ i, ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= unmap_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+ for (i = 0; i < state->stream_count; i++)
+ result &= map_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i],
+ &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]);
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
index 1cf8a884c0fb..9dab4e43c511 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
@@ -109,10 +109,21 @@ enum dml2_architecture {
dml2_architecture_20,
};
+struct dml2_pipe_combine_factor {
+ unsigned int source;
+ unsigned int target;
+};
+
+struct dml2_pipe_combine_scratch {
+ struct dml2_pipe_combine_factor odm_factors[MAX_PIPES];
+ struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES][MAX_PIPES];
+};
+
struct dml2_context {
enum dml2_architecture architecture;
struct dml2_configuration_options config;
struct dml2_helper_det_policy_scratch det_helper_scratch;
+ struct dml2_pipe_combine_scratch pipe_combine_scratch;
union {
struct {
struct display_mode_lib_st dml_core_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 6e97337863e0..54aff9beb73a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -86,7 +86,9 @@ struct dml2_dc_callbacks {
const struct dc_plane_state *plane,
int slice_count);
int (*get_odm_slice_index)(const struct pipe_ctx *opp_head);
+ int (*get_odm_slice_count)(const struct pipe_ctx *opp_head);
int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe);
+ int (*get_mpc_slice_count)(const struct pipe_ctx *dpp_pipe);
struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx);
struct pipe_ctx *(*get_otg_master_for_stream)(
struct resource_context *res_ctx,
--
2.34.1
next prev parent reply other threads:[~2024-03-28 19:51 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-28 19:50 [PATCH 00/43] DC Patches Apr 1, 2024 Roman.Li
2024-03-28 19:50 ` [PATCH 01/43] drm/amd/display: Fix compiler redefinition warnings for certain configs Roman.Li
2024-04-01 13:07 ` Wheeler, Daniel
2024-03-28 19:50 ` [PATCH 02/43] drm/amd/display: Add timing pixel encoding for mst mode validation Roman.Li
2024-03-28 19:50 ` [PATCH 03/43] drm/amd/display: fix underflow in some two display subvp/non-subvp configs Roman.Li
2024-03-28 19:50 ` Roman.Li [this message]
2024-03-28 19:50 ` [PATCH 05/43] drm/amd/display: Toggle additional RCO options in DCN35 Roman.Li
2024-03-28 19:50 ` [PATCH 06/43] drm/amd/display: Decouple dcn35 and dcn351 dmub firmware Roman.Li
2024-03-28 19:50 ` [PATCH 07/43] drm/amd/display: Expand supported Replay residency mode Roman.Li
2024-03-28 19:50 ` [PATCH 08/43] drm/amd/display: FEC overhead should be checked once for mst slot nums Roman.Li
2024-07-18 7:09 ` Jiri Slaby
2024-07-30 6:00 ` Lin, Wayne
2024-07-31 6:47 ` Jiri Slaby
2024-03-28 19:50 ` [PATCH 09/43] drm/amd/display: handle invalid connector indices Roman.Li
2024-03-28 19:50 ` [PATCH 10/43] drm/amd/display: Add dmub additional interface support for FAMS Roman.Li
2024-03-28 19:50 ` [PATCH 11/43] drm/amd/display: update pipe topology log to support subvp Roman.Li
2024-03-28 19:50 ` [PATCH 12/43] drm/amd/display: Enable DTBCLK DTO earlier in the sequence Roman.Li
2024-03-28 19:50 ` [PATCH 13/43] drm/amd/display: Add dummy interface for tracing DCN32 SMU messages Roman.Li
2024-03-28 19:50 ` [PATCH 14/43] drm/amd/display: Enable RCO for HDMISTREAMCLK in DCN35 Roman.Li
2024-03-28 19:50 ` [PATCH 15/43] drm/amd/display: Allow HPO PG for DCN35 Roman.Li
2024-03-28 19:50 ` [PATCH 16/43] drm/amd/display: Skip on writeback when it's not applicable Roman.Li
2024-03-28 19:50 ` [PATCH 17/43] drm/amd/display: Add OTG check for set AV mute Roman.Li
2024-03-28 19:50 ` [PATCH 18/43] drm/amd/display: Add extra logging for HUBP and OTG Roman.Li
2024-03-28 19:50 ` [PATCH 19/43] drm/amd/display: Disable Z8 minimum stutter period check for DCN35 Roman.Li
2024-03-28 19:50 ` [PATCH 20/43] drm/amd/display: add root clock control function pointer to fix display corruption Roman.Li
2024-03-28 19:50 ` [PATCH 21/43] drm/amd/display: Add extra DMUB logging to track message timeout Roman.Li
2024-03-28 19:50 ` [PATCH 22/43] drm/amd/display: remove context->dml2 dependency from DML21 wrapper Roman.Li
2024-03-28 19:50 ` [PATCH 23/43] drm/amd/display: Add handling for DC power mode Roman.Li
2024-03-28 19:50 ` [PATCH 24/43] drm/amd/display: move build test pattern params as part of pipe resource update for odm Roman.Li
2024-03-28 19:50 ` [PATCH 25/43] drm/amd/display: Fix compiler warnings on high compiler warning levels Roman.Li
2024-03-28 19:50 ` [PATCH 26/43] drm/amd/display: Allow RCG for Static Screen + LVP for DCN35 Roman.Li
2024-03-28 19:50 ` [PATCH 27/43] drm/amd/display: 3.2.279 Roman.Li
2024-03-28 19:50 ` [PATCH 28/43] drm/amd/display: Initialize DP ref clk with the correct clock Roman.Li
2024-03-28 19:50 ` [PATCH 29/43] drm/amd/display: Set alpha enable to 0 for some specific formats Roman.Li
2024-03-28 19:50 ` [PATCH 30/43] drm/amd/display: Enable cur_rom_en even if cursor degamma is not enabled Roman.Li
2024-04-01 13:40 ` Melissa Wen
2024-04-01 13:52 ` Harry Wentland
2024-03-28 19:50 ` [PATCH 31/43] drm/amd/display: Add some missing debug registers Roman.Li
2024-03-28 19:50 ` [PATCH 32/43] drm/amd/display: Update DSC compute parameter calculation Roman.Li
2024-03-28 19:50 ` [PATCH 33/43] drm/amd/display: Drop legacy code Roman.Li
2024-03-28 19:50 ` [PATCH 34/43] drm/amd/display: Add missing registers Roman.Li
2024-03-28 19:50 ` [PATCH 35/43] drm/amd/display: Remove redundant RESERVE0 and RESERVE1 Roman.Li
2024-03-28 19:50 ` [PATCH 36/43] drm/amd/display: Add missing SFB and OPP_SF Roman.Li
2024-03-28 19:50 ` [PATCH 37/43] drm/amd/display: Initialize debug variable data Roman.Li
2024-03-28 19:50 ` [PATCH 38/43] drm/amd/display: Fix MPCC DTN logging Roman.Li
2024-04-01 13:30 ` Melissa Wen
2024-03-28 19:50 ` [PATCH 39/43] drm/amd/display: Add WBSCL ram coefficient for writeback Roman.Li
2024-03-28 19:50 ` [PATCH 40/43] drm/amd/display: Add code comments clock and encode code Roman.Li
2024-03-28 19:50 ` [PATCH 41/43] drm/amd/display: Includes adjustments Roman.Li
2024-03-28 19:50 ` [PATCH 42/43] drm/amd/display: Add color logs for dcn20 Roman.Li
2024-03-28 19:50 ` [PATCH 43/43] drm/amd/display: Enable FGCG for DCN351 Roman.Li
2024-04-01 13:21 ` [PATCH 00/43] DC Patches Apr 1, 2024 Wheeler, Daniel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240328195047.2843715-5-Roman.Li@amd.com \
--to=roman.li@amd.com \
--cc=Aurabindo.Pillai@amd.com \
--cc=Harry.Wentland@amd.com \
--cc=Rodrigo.Siqueira@amd.com \
--cc=Sunpeng.Li@amd.com \
--cc=agustin.gutierrez@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=daniel.wheeler@amd.com \
--cc=dillon.varone@amd.com \
--cc=hamza.mahfooz@amd.com \
--cc=solomon.chiu@amd.com \
--cc=wayne.lin@amd.com \
--cc=wenjing.liu@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox