From: Ville Syrjala <ville.syrjala@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Cc: intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Subject: [PATCH 5/9] drm/i915/dp: Implement the POST_LT_ADJ_REQ sequence
Date: Mon, 24 Feb 2025 19:26:41 +0200 [thread overview]
Message-ID: <20250224172645.15763-6-ville.syrjala@linux.intel.com> (raw)
In-Reply-To: <20250224172645.15763-1-ville.syrjala@linux.intel.com>
From: Ville Syrjälä <ville.syrjala@linux.intel.com>
Implement the POST_LT_ADJ_REQ sequence, which should be used
to further fine tune the link if TPS4 is not supported.
The POST_LT_ADJ_REQ sequence will be performed after
the normal link training has succeeded.
Only the final hop between the last LTTPR and DPRX will
perform the POST_LT_ADJ_REQ adjustment. The earlier hops
will use TPS4 instead since it's mandatory for LTTPRs.
start The sequence will terminate when the sink clears the
"in progress" flag, the vswing/pre-emphasis values have
changed six times, or the vswing/pre-emphasis values have
remained unchanged for 200 ms.
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
.../drm/i915/display/intel_dp_link_training.c | 128 +++++++++++++++++-
.../drm/i915/display/intel_dp_link_training.h | 2 +-
drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +-
3 files changed, 128 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2506996bf16d..8863fc2c44ff 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -738,11 +738,14 @@ static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
int link_bw, int rate_select, int lane_count,
- bool enhanced_framing)
+ bool enhanced_framing, bool post_lt_adj_req)
{
if (enhanced_framing)
lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ if (post_lt_adj_req)
+ lane_count |= DP_POST_LT_ADJ_REQ_GRANTED;
+
if (link_bw) {
/* DP and eDP v1.3 and earlier link bw set method. */
u8 link_config[] = { link_bw, lane_count };
@@ -764,12 +767,25 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
}
}
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
+
+static bool intel_dp_use_post_lt_adj_req(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return intel_dp->set_idle_link_train &&
+ drm_dp_post_lt_adj_req_supported(intel_dp->dpcd) &&
+ intel_dp_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX) != DP_TRAINING_PATTERN_4;
+}
+
static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 link_bw, u8 rate_select)
{
intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, crtc_state->lane_count,
- crtc_state->enhanced_framing);
+ crtc_state->enhanced_framing,
+ intel_dp_use_post_lt_adj_req(intel_dp, crtc_state));
}
/*
@@ -1087,6 +1103,109 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
return channel_eq;
}
+static bool
+intel_dp_post_lt_adj_req(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ unsigned long deadline;
+ bool timeout = false;
+ bool success = false;
+ int changes = 0;
+
+ if (!intel_dp_use_post_lt_adj_req(intel_dp, crtc_state))
+ return true;
+
+ if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
+ link_status) < 0) {
+ lt_err(intel_dp, DP_PHY_DPRX, "Failed to get link status\n");
+ return false;
+ }
+
+ deadline = jiffies + msecs_to_jiffies_timeout(200);
+
+ for (;;) {
+ /* Make sure clock is still ok */
+ if (!drm_dp_clock_recovery_ok(link_status,
+ crtc_state->lane_count)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ lt_dbg(intel_dp, DP_PHY_DPRX,
+ "Clock recovery check failed, cannot continue POST_LT_ADJ_REQ\n");
+ break;
+ }
+
+ if (!drm_dp_channel_eq_ok(link_status,
+ crtc_state->lane_count)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ lt_dbg(intel_dp, DP_PHY_DPRX, "Channel EQ check failed. cannot continue POST_LT_ADJ_REQ\n");
+ break;
+ }
+
+ if (!drm_dp_post_lt_adj_req_in_progress(link_status)) {
+ success = true;
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ lt_dbg(intel_dp, DP_PHY_DPRX,
+ "POST_LT_ADJ_REQ done (%d changes). DP Training successful\n", changes);
+ break;
+ }
+
+ if (changes == 6) {
+ success = true;
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ lt_dbg(intel_dp, DP_PHY_DPRX,
+ "POST_LT_ADJ_REQ limit reached (%d changes). DP Training successful\n", changes);
+ break;
+ }
+
+ if (timeout) {
+ success = true;
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ lt_dbg(intel_dp, DP_PHY_DPRX,
+ "POST_LT_ADJ_REQ timeout reached (%d changes). DP Training successful\n", changes);
+ break;
+ }
+
+ fsleep(5000);
+
+ if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
+ link_status) < 0) {
+ lt_err(intel_dp, DP_PHY_DPRX, "Failed to get link status\n");
+ break;
+ }
+
+ /* Update training set as requested by target */
+ if (intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status)) {
+ deadline = jiffies + msecs_to_jiffies_timeout(200);
+ changes++;
+
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
+ lt_err(intel_dp, DP_PHY_DPRX, "Failed to update link training\n");
+ break;
+ }
+ } else if (time_after(jiffies, deadline)) {
+ timeout = true;
+ }
+ }
+
+ return success;
+}
+
+static void intel_dp_stop_post_lt_adj_req(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 lane_count;
+
+ if (!intel_dp_use_post_lt_adj_req(intel_dp, crtc_state))
+ return;
+
+ /* clear DP_POST_LT_ADJ_REQ_GRANTED */
+ lane_count = crtc_state->lane_count;
+ if (crtc_state->enhanced_framing)
+ lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
+}
+
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
@@ -1372,6 +1491,11 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
intel_dp->set_idle_link_train(intel_dp, crtc_state);
}
+ if (ret)
+ ret = intel_dp_post_lt_adj_req(intel_dp, crtc_state);
+
+ intel_dp_stop_post_lt_adj_req(intel_dp, crtc_state);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 1ba22ed6db08..33dcbde6a408 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -21,7 +21,7 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp,
int link_rate, bool is_vrr);
void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
int link_bw, int rate_select, int lane_count,
- bool enhanced_framing);
+ bool enhanced_framing, bool post_lt_adj_req);
bool intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 167e4a70ab12..d937143ed10f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -2109,7 +2109,7 @@ void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
intel_dp_link_training_set_mode(intel_dp, link_rate, false);
intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count,
- drm_dp_enhanced_frame_cap(intel_dp->dpcd));
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd), false);
intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count);
}
--
2.45.3
next prev parent reply other threads:[~2025-02-24 17:27 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-24 17:26 [PATCH 0/9] drm/i915/dp: Implement POST_LT_ADJ_REQ Ville Syrjala
2025-02-24 17:26 ` [PATCH 1/9] drm/dp: Add definitions for POST_LT_ADJ training sequence Ville Syrjala
2025-02-25 15:55 ` Jani Nikula
2025-02-27 20:42 ` [PATCH v2 " Ville Syrjala
2025-06-16 16:28 ` Imre Deak
2025-02-24 17:26 ` [PATCH 2/9] drm/dp: Add POST_LT_ADJ_REQ helpers Ville Syrjala
2025-02-24 17:26 ` [PATCH 3/9] drm/i915/dp: Clear DPCD training pattern before transmitting the idle pattern Ville Syrjala
2025-02-24 17:26 ` [PATCH 4/9] drm/i915/dp: Have intel_dp_get_adjust_train() tell us if anything changed Ville Syrjala
2025-02-24 17:26 ` Ville Syrjala [this message]
2025-06-16 16:30 ` [PATCH 5/9] drm/i915/dp: Implement the POST_LT_ADJ_REQ sequence Imre Deak
2025-02-24 17:26 ` [PATCH 6/9] drm/i915/dp: Move intel_dp_training_pattern() Ville Syrjala
2025-02-24 17:26 ` [PATCH 7/9] drm/i915/dp: Implement .set_idle_link_train() for everyone Ville Syrjala
2025-06-16 16:31 ` Imre Deak
2025-02-24 17:26 ` [PATCH 8/9] drm/i915/dp: Make .set_idle_link_train() mandatory Ville Syrjala
2025-02-24 17:26 ` [PATCH 9/9] hax: drm/i915: Disable TPS4 support to force POST_LT_ADJ_REQ usage Ville Syrjala
2025-02-25 1:06 ` ✓ CI.Patch_applied: success for drm/i915/dp: Implement POST_LT_ADJ_REQ Patchwork
2025-02-25 1:06 ` ✗ CI.checkpatch: warning " Patchwork
2025-02-25 1:07 ` ✓ CI.KUnit: success " Patchwork
2025-02-25 1:24 ` ✓ CI.Build: " Patchwork
2025-02-25 1:26 ` ✓ CI.Hooks: " Patchwork
2025-02-25 1:28 ` ✗ CI.checksparse: warning " Patchwork
2025-02-25 1:46 ` ✓ Xe.CI.BAT: success " Patchwork
2025-02-25 4:34 ` ✗ Xe.CI.Full: failure " Patchwork
2025-02-27 22:35 ` ✓ CI.Patch_applied: success for drm/i915/dp: Implement POST_LT_ADJ_REQ (rev2) Patchwork
2025-02-27 22:35 ` ✗ CI.checkpatch: warning " Patchwork
2025-02-27 22:37 ` ✓ CI.KUnit: success " Patchwork
2025-02-27 22:53 ` ✓ CI.Build: " Patchwork
2025-02-27 22:56 ` ✓ CI.Hooks: " Patchwork
2025-02-27 22:57 ` ✗ CI.checksparse: warning " Patchwork
2025-02-27 23:15 ` ✓ Xe.CI.BAT: success " Patchwork
2025-02-28 3:23 ` ✗ Xe.CI.Full: failure " Patchwork
2025-06-16 16:25 ` [PATCH 0/9] drm/i915/dp: Implement POST_LT_ADJ_REQ Imre Deak
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250224172645.15763-6-ville.syrjala@linux.intel.com \
--to=ville.syrjala@linux.intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-gfx@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox