From: Imre Deak <imre.deak@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <intel-xe@lists.freedesktop.org>
Subject: [PATCH 4/5] drm/i915/dp: Verify branch devices' overall pixel throughput/line width
Date: Fri, 19 Sep 2025 00:12:22 +0300 [thread overview]
Message-ID: <20250918211223.209674-5-imre.deak@intel.com> (raw)
In-Reply-To: <20250918211223.209674-1-imre.deak@intel.com>
Read out the branch devices' maximum overall DSC pixel throughput and
line width and verify the mode's corresponding pixel clock and hactive
period against these.
Signed-off-by: Imre Deak <imre.deak@intel.com>
---
.../drm/i915/display/intel_display_types.h | 8 +++
drivers/gpu/drm/i915/display/intel_dp.c | 62 +++++++++++++++++++
2 files changed, 70 insertions(+)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 358ab922d7a76..73bdafae604f8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -552,6 +552,14 @@ struct intel_connector {
u8 dsc_hblank_expansion_quirk:1;
u8 dsc_decompression_enabled:1;
+
+ struct {
+ struct {
+ int rgb_yuv444;
+ int yuv422_420;
+ } overall_throughput;
+ int max_line_width;
+ } dsc_branch_caps;
} dp;
struct {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 8cc123b0fd752..dd082d2fcc968 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1042,6 +1042,20 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int tp_yuv422_420;
u8 val;
+ /*
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output.
+ * The RGB/YUV444 throughput value should be always either equal
+ * or smaller than the YUV422/420 value, but let's not depend on
+ * this assumption.
+ */
+ if (mode_clock > max(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444,
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420))
+ return 0;
+
+ if (mode_hdisplay > connector->dp.dsc_branch_caps.max_line_width)
+ return 0;
+
val = connector->dp.dsc_dpcd[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
tp_rgb_yuv444 = dsc_per_slice_throughput(display, mode_clock,
REG_FIELD_GET8(DP_DSC_THROUGHPUT_MODE_0_MASK,
@@ -4204,6 +4218,44 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
dsc_dpcd);
}
+static int dsc_branch_overall_throughput(u8 bw_code)
+{
+ switch (bw_code) {
+ case 0:
+ return INT_MAX;
+ case 1:
+ return 680000;
+ default:
+ return 600000 + 50000 * bw_code;
+ }
+}
+
+static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch)
+{
+ u8 branch_caps[3];
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = INT_MAX;
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = INT_MAX;
+ connector->dp.dsc_branch_caps.max_line_width = INT_MAX;
+
+ if (!is_branch)
+ return;
+
+ if (drm_dp_dpcd_read_data(connector->dp.dsc_decompression_aux,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, branch_caps,
+ sizeof(branch_caps)) != 0)
+ return;
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 =
+ dsc_branch_overall_throughput(branch_caps[0]);
+
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 =
+ dsc_branch_overall_throughput(branch_caps[1]);
+
+ if (branch_caps[2] != 0)
+ connector->dp.dsc_branch_caps.max_line_width = branch_caps[2] * 320;
+}
+
void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
const struct drm_dp_desc *desc, bool is_branch,
struct intel_connector *connector)
@@ -4219,6 +4271,8 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
/* Clear fec_capable to avoid using stale values */
connector->dp.fec_capability = 0;
+ memset(&connector->dp.dsc_branch_caps, 0, sizeof(connector->dp.dsc_branch_caps));
+
if (dpcd_rev < DP_DPCD_REV_14)
return;
@@ -4233,6 +4287,11 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n",
connector->dp.fec_capability);
+
+ if (!(connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
+ return;
+
+ init_dsc_overall_throughput_limits(connector, is_branch);
}
static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector)
@@ -4241,6 +4300,9 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
return;
intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
+
+ if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)
+ init_dsc_overall_throughput_limits(connector, false);
}
static void
--
2.49.1
next prev parent reply other threads:[~2025-09-18 21:12 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-18 21:12 [PATCH 0/5] drm/i915/dp: Work around a DSC pixel throughput issue Imre Deak
2025-09-18 21:12 ` [PATCH 1/5] drm/dp: Add quirk for Synaptics DSC throughput link-bpp limit Imre Deak
2025-09-20 17:18 ` kernel test robot
2025-09-22 13:46 ` [PATCH v2 " Imre Deak
2025-09-18 21:12 ` [PATCH 2/5] drm/i915/dp: Calculate DSC slice count based on per-slice peak throughput Imre Deak
2025-09-22 19:34 ` Ville Syrjälä
2025-09-23 11:47 ` Imre Deak
2025-09-18 21:12 ` [PATCH 3/5] drm/i915/dp: Pass DPCD device descriptor to intel_dp_get_dsc_sink_cap() Imre Deak
2025-09-18 21:12 ` Imre Deak [this message]
2025-09-22 19:35 ` [PATCH 4/5] drm/i915/dp: Verify branch devices' overall pixel throughput/line width Ville Syrjälä
2025-09-22 20:06 ` Ville Syrjälä
2025-09-23 12:00 ` Imre Deak
2025-09-18 21:12 ` [PATCH 5/5] drm/i915/dp: Handle Synaptics DSC throughput link-bpp quirk Imre Deak
2025-09-22 13:46 ` [PATCH v2 " Imre Deak
2025-09-22 20:17 ` Ville Syrjälä
2025-09-23 12:08 ` Imre Deak
2025-09-25 22:08 ` [PATCH v4 " Imre Deak
2025-09-18 23:32 ` ✓ i915.CI.BAT: success for drm/i915/dp: Work around a DSC pixel throughput issue Patchwork
2025-09-19 21:10 ` ✓ i915.CI.Full: " Patchwork
2025-09-22 14:32 ` ✓ i915.CI.BAT: success for drm/i915/dp: Work around a DSC pixel throughput issue (rev3) Patchwork
2025-09-22 16:19 ` ✗ i915.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250918211223.209674-5-imre.deak@intel.com \
--to=imre.deak@intel.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox