From: Chenyu Chen <chen-yu.chen@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Harry Wentland <harry.wentland@amd.com>,
Leo Li <sunpeng.li@amd.com>,
Aurabindo Pillai <aurabindo.pillai@amd.com>,
Roman Li <roman.li@amd.com>, Wayne Lin <wayne.lin@amd.com>,
Tom Chung <chiahsuan.chung@amd.com>,
"Fangzhi Zuo" <jerry.zuo@amd.com>,
Dan Wheeler <daniel.wheeler@amd.com>, Ray Wu <Ray.Wu@amd.com>,
Ivan Lipski <ivan.lipski@amd.com>, Alex Hung <alex.hung@amd.com>,
Joshua Aberback <joshua.aberback@amd.com>,
Aric Cyr <aric.cyr@amd.com>, Chenyu Chen <chen-yu.chen@amd.com>
Subject: [PATCH 1/6] drm/amd/display: Defer transitions from minimal state to final state
Date: Tue, 2 Dec 2025 18:21:03 +0800 [thread overview]
Message-ID: <20251202102437.3126523-2-chen-yu.chen@amd.com> (raw)
In-Reply-To: <20251202102437.3126523-1-chen-yu.chen@amd.com>
From: Joshua Aberback <joshua.aberback@amd.com>
[Why]
In non-seamless pipe transitions, it can take several frames to process
a single flip. One of the reasons is the 2-step transition implementation
where first the minimal transition state is applied, then the final state
is applied, all within the same flip. This delay is noticeable to the user
in some video playback scenarios, which makes for a bad user experience.
[How]
- in applicable non-seamless cases, complete the flip with the minimal
state applied, start a counter, and create all new contexts as minimal
- if another pipe transition occurs while counting, reset the counter
- when the counter finishes, promote the current flip to a full update
and restore creation of optimized contexts
- when creating minimal states from new context, apply stream updates
Reviewed-by: Aric Cyr <aric.cyr@amd.com>
Signed-off-by: Joshua Aberback <joshua.aberback@amd.com>
Signed-off-by: Chenyu Chen <chen-yu.chen@amd.com>
---
drivers/gpu/drm/amd/display/dc/core/dc.c | 200 +++++++++++++++--------
drivers/gpu/drm/amd/display/dc/dc.h | 7 +
2 files changed, 137 insertions(+), 70 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1e7c61b975e3..e0db791953a5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2963,6 +2963,11 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
{
struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
+ /* When countdown finishes, promote this flip to full to trigger deferred final transition */
+ if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+
if (stream_update && stream_update->pending_test_pattern) {
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
@@ -3441,6 +3446,49 @@ static bool full_update_required_weak(
const struct dc_stream_update *stream_update,
const struct dc_stream_state *stream);
+struct pipe_split_policy_backup {
+ bool dynamic_odm_policy;
+ bool subvp_policy;
+ enum pipe_split_policy mpc_policy;
+ char force_odm[MAX_PIPES];
+};
+
+static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ int i;
+
+ if (!dc->config.is_vmin_only_asic) {
+ policy->mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+ policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
+ policy->subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+ for (i = 0; i < context->stream_count; i++) {
+ policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
+ if (context->streams[i]->debug.allow_transition_for_forced_odm)
+ context->streams[i]->debug.force_odm_combine_segments = 0;
+ }
+}
+
+static void restore_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ uint8_t i;
+
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy =
+ policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
+}
+
/**
* update_planes_and_stream_state() - The function takes planes and stream
* updates as inputs and determines the appropriate update type. If update type
@@ -3591,10 +3639,30 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
+ struct pipe_split_policy_backup policy;
+ bool minimize = false;
+
+ if (dc->check_config.deferred_transition_state) {
+ if (dc->check_config.transition_countdown_to_steady_state) {
+ /* During countdown, all new contexts created as minimal transition states */
+ minimize = true;
+ } else {
+ dc->check_config.deferred_transition_state = false;
+ }
+ }
+
+ if (minimize)
+ backup_and_set_minimal_pipe_split_policy(dc, context, &policy);
+
if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
+ if (minimize)
+ restore_minimal_pipe_split_policy(dc, context, &policy);
BREAK_TO_DEBUGGER();
goto fail;
}
+
+ if (minimize)
+ restore_minimal_pipe_split_policy(dc, context, &policy);
}
update_seamless_boot_flags(dc, context, surface_count, stream);
@@ -4622,48 +4690,6 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
return force_minimal_pipe_splitting;
}
-struct pipe_split_policy_backup {
- bool dynamic_odm_policy;
- bool subvp_policy;
- enum pipe_split_policy mpc_policy;
- char force_odm[MAX_PIPES];
-};
-
-static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
- struct dc_state *context,
- struct pipe_split_policy_backup *policy)
-{
- int i;
-
- if (!dc->config.is_vmin_only_asic) {
- policy->mpc_policy = dc->debug.pipe_split_policy;
- dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
- }
- policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
- dc->debug.enable_single_display_2to1_odm_policy = false;
- policy->subvp_policy = dc->debug.force_disable_subvp;
- dc->debug.force_disable_subvp = true;
- for (i = 0; i < context->stream_count; i++) {
- policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
- if (context->streams[i]->debug.allow_transition_for_forced_odm)
- context->streams[i]->debug.force_odm_combine_segments = 0;
- }
-}
-
-static void restore_minimal_pipe_split_policy(struct dc *dc,
- struct dc_state *context,
- struct pipe_split_policy_backup *policy)
-{
- uint8_t i;
-
- if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = policy->mpc_policy;
- dc->debug.enable_single_display_2to1_odm_policy =
- policy->dynamic_odm_policy;
- dc->debug.force_disable_subvp = policy->subvp_policy;
- for (i = 0; i < context->stream_count; i++)
- context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
-}
static void release_minimal_transition_state(struct dc *dc,
struct dc_state *minimal_transition_context,
@@ -4773,6 +4799,7 @@ static int initialize_empty_surface_updates(
static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
struct dc_state *new_context,
struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
struct dc_surface_update *srf_updates,
int surface_count)
{
@@ -4790,7 +4817,7 @@ static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
new_context)) {
DC_LOG_DC("commit minimal transition state: base = new state\n");
commit_planes_for_stream(dc, srf_updates,
- surface_count, stream, NULL,
+ surface_count, stream, stream_update,
UPDATE_TYPE_FULL, intermediate_context);
swap_and_release_current_context(
dc, intermediate_context, stream);
@@ -4884,8 +4911,8 @@ static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
int surface_count)
{
bool success = commit_minimal_transition_based_on_new_context(
- dc, new_context, stream, srf_updates,
- surface_count);
+ dc, new_context, stream, NULL,
+ srf_updates, surface_count);
if (!success)
success = commit_minimal_transition_based_on_current_context(dc,
new_context, stream);
@@ -5294,32 +5321,63 @@ static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
enum surface_update_type update_type,
struct dc_state *new_context)
{
+ bool skip_new_context = false;
ASSERT(update_type >= UPDATE_TYPE_FULL);
- if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
- dc->current_state, new_context))
- /*
- * It is required by the feature design that all pipe topologies
- * using extra free pipes for power saving purposes such as
- * dynamic ODM or SubVp shall only be enabled when it can be
- * transitioned seamlessly to AND from its minimal transition
- * state. A minimal transition state is defined as the same dc
- * state but with all power saving features disabled. So it uses
- * the minimum pipe topology. When we can't seamlessly
- * transition from state A to state B, we will insert the
- * minimal transition state A' or B' in between so seamless
- * transition between A and B can be made possible.
- */
- commit_minimal_transition_state_in_dc_update(dc, new_context,
- stream, srf_updates, surface_count);
+ /*
+ * It is required by the feature design that all pipe topologies
+ * using extra free pipes for power saving purposes such as
+ * dynamic ODM or SubVp shall only be enabled when it can be
+ * transitioned seamlessly to AND from its minimal transition
+ * state. A minimal transition state is defined as the same dc
+ * state but with all power saving features disabled. So it uses
+ * the minimum pipe topology. When we can't seamlessly
+ * transition from state A to state B, we will insert the
+ * minimal transition state A' or B' in between so seamless
+ * transition between A and B can be made possible.
+ *
+ * To optimize for the time it takes to execute flips,
+ * the transition from the minimal state to the final state is
+ * deferred until a steady state (no more transitions) is reached.
+ */
+ if (!dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, new_context)) {
+ if (!dc->debug.disable_deferred_minimal_transitions) {
+ dc->check_config.deferred_transition_state = true;
+ dc->check_config.transition_countdown_to_steady_state =
+ dc->debug.num_fast_flips_to_steady_state_override ?
+ dc->debug.num_fast_flips_to_steady_state_override :
+ NUM_FAST_FLIPS_TO_STEADY_STATE;
+
+ if (commit_minimal_transition_based_on_new_context(dc, new_context, stream, stream_update,
+ srf_updates, surface_count)) {
+ skip_new_context = true;
+ dc_state_release(new_context);
+ new_context = dc->current_state;
+ } else {
+ /*
+ * In this case a new mpo plane is being enabled on pipes that were
+ * previously in use, and the surface update to the existing plane
+ * includes an alpha box where the new plane will be, so the update
+ * from minimal to final cannot be deferred as the alpha box would
+ * be visible to the user
+ */
+ commit_minimal_transition_based_on_current_context(dc, new_context, stream);
+ }
+ } else {
+ commit_minimal_transition_state_in_dc_update(dc, new_context, stream,
+ srf_updates, surface_count);
+ }
+ } else if (dc->check_config.deferred_transition_state) {
+ /* reset countdown as steady state not reached */
+ dc->check_config.transition_countdown_to_steady_state =
+ dc->debug.num_fast_flips_to_steady_state_override ?
+ dc->debug.num_fast_flips_to_steady_state_override :
+ NUM_FAST_FLIPS_TO_STEADY_STATE;
+ }
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- new_context);
+ if (!skip_new_context) {
+ commit_planes_for_stream(dc, srf_updates, surface_count, stream, stream_update, update_type, new_context);
+ swap_and_release_current_context(dc, new_context, stream);
+ }
}
static bool update_planes_and_stream_v3(struct dc *dc,
@@ -5349,11 +5407,13 @@ static bool update_planes_and_stream_v3(struct dc *dc,
commit_planes_and_stream_update_on_current_context(dc,
srf_updates, surface_count, stream,
stream_update, update_type);
+
+ if (dc->check_config.transition_countdown_to_steady_state)
+ dc->check_config.transition_countdown_to_steady_state--;
} else {
commit_planes_and_stream_update_with_new_context(dc,
srf_updates, surface_count, stream,
stream_update, update_type, new_context);
- swap_and_release_current_context(dc, new_context, stream);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 167cfb1b01dd..d926bf54185b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -82,6 +82,8 @@ struct dcn_dccg_reg_state;
#define MAX_DPIA_PER_HOST_ROUTER 3
#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER)
+#define NUM_FAST_FLIPS_TO_STEADY_STATE 20
+
/* Display Core Interfaces */
struct dc_versions {
const char *dc_ver;
@@ -293,6 +295,9 @@ struct dc_check_config {
*/
unsigned int max_optimizable_video_width;
bool enable_legacy_fast_update;
+
+ bool deferred_transition_state;
+ unsigned int transition_countdown_to_steady_state;
};
struct dc_caps {
@@ -1201,6 +1206,8 @@ struct dc_debug_options {
bool disable_stutter_for_wm_program;
bool enable_block_sequence_programming;
uint32_t custom_psp_footer_size;
+ bool disable_deferred_minimal_transitions;
+ unsigned int num_fast_flips_to_steady_state_override;
};
--
2.43.0
next prev parent reply other threads:[~2025-12-02 10:28 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-02 10:21 [PATCH 0/6] DC Patches Dec 08 2025 Chenyu Chen
2025-12-02 10:21 ` Chenyu Chen [this message]
2025-12-02 10:21 ` [PATCH 2/6] drm/amd/display: Remove periodic detection callbacks from dcn35+ Chenyu Chen
2025-12-02 10:21 ` [PATCH 3/6] drm/amd/display: Fixes for S0i3 exit Chenyu Chen
2025-12-02 10:21 ` [PATCH 4/6] drm/amd/display: Refactor dml_core_mode_support to reduce stack frame Chenyu Chen
2025-12-02 16:29 ` Alex Hung
2025-12-02 16:31 ` Alex Hung
2025-12-03 9:42 ` Chen, Chen-Yu
2025-12-02 10:21 ` [PATCH 5/6] drm/amd/display: Additional info from DML for DMU Chenyu Chen
2025-12-02 10:21 ` [PATCH 6/6] drm/amd/display: Promote DC to 3.2.362 Chenyu Chen
2025-12-08 13:58 ` [PATCH 0/6] DC Patches Dec 08 2025 Wheeler, Daniel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251202102437.3126523-2-chen-yu.chen@amd.com \
--to=chen-yu.chen@amd.com \
--cc=Ray.Wu@amd.com \
--cc=alex.hung@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=aric.cyr@amd.com \
--cc=aurabindo.pillai@amd.com \
--cc=chiahsuan.chung@amd.com \
--cc=daniel.wheeler@amd.com \
--cc=harry.wentland@amd.com \
--cc=ivan.lipski@amd.com \
--cc=jerry.zuo@amd.com \
--cc=joshua.aberback@amd.com \
--cc=roman.li@amd.com \
--cc=sunpeng.li@amd.com \
--cc=wayne.lin@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox