From: Ville Syrjala <ville.syrjala@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 2/2] drm: Refactor intel_dp_compute_link_config_*()
Date: Thu, 7 Jan 2021 20:20:26 +0200 [thread overview]
Message-ID: <20210107182026.24848-2-ville.syrjala@linux.intel.com> (raw)
In-Reply-To: <20210107182026.24848-1-ville.syrjala@linux.intel.com>
From: Ville Syrjälä <ville.syrjala@linux.intel.com>
Pull the common parts of intel_dp_compute_link_config_wide()
and intel_dp_compute_link_config_fast() into a shared helper
to avoid duplicated code.
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
drivers/gpu/drm/i915/display/intel_dp.c | 74 ++++++++++++++-----------
1 file changed, 43 insertions(+), 31 deletions(-)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 57c2140c1316..d682cf57e455 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2259,34 +2259,47 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
}
}
+static bool
+intel_dp_link_config_valid(const struct intel_crtc_state *crtc_state,
+ int bpp, int link_clock, int lane_count)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int output_bpp = intel_dp_output_bpp(crtc_state->output_format, bpp);
+ int mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ output_bpp);
+ int link_avail = intel_dp_max_data_rate(link_clock, lane_count);
+
+ return mode_rate <= link_avail;
+}
+
/* Optimize link config in order: max bpp, min clock, min lanes */
static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
const struct link_config_limits *limits)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
+ int bpp;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+ int clock;
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- output_bpp);
+ for (clock = limits->min_clock;
+ clock <= limits->max_clock;
+ clock++) {
+ int lane_count;
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
+ int link_clock = intel_dp->common_rates[clock];
- if (mode_rate <= link_avail) {
- pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
+ if (intel_dp_link_config_valid(crtc_state, bpp,
+ link_clock,
+ lane_count)) {
+ crtc_state->pipe_bpp = bpp;
+ crtc_state->port_clock = link_clock;
+ crtc_state->lane_count = lane_count;
return 0;
}
@@ -2300,31 +2313,30 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
/* Optimize link config in order: max bpp, min lanes, min clock */
static int
intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
const struct link_config_limits *limits)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
+ int bpp;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
-
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- output_bpp);
+ int lane_count;
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
+ int clock;
- if (mode_rate <= link_avail) {
- pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
+ for (clock = limits->min_clock;
+ clock <= limits->max_clock;
+ clock++) {
+ int link_clock = intel_dp->common_rates[clock];
+
+ if (intel_dp_link_config_valid(crtc_state, bpp,
+ link_clock,
+ lane_count)) {
+ crtc_state->pipe_bpp = bpp;
+ crtc_state->port_clock = link_clock;
+ crtc_state->lane_count = lane_count;
return 0;
}
--
2.26.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2021-01-07 18:20 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-07 18:20 [Intel-gfx] [PATCH 1/2] drm/i915: Try to use fast+narrow link on eDP again and fall back to the old max strategy on failure Ville Syrjala
2021-01-07 18:20 ` Ville Syrjala [this message]
2021-01-07 19:38 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [1/2] " Patchwork
2021-01-07 20:06 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-01-08 1:25 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
2021-01-11 18:28 ` [Intel-gfx] [PATCH 1/2] " Ville Syrjälä
2021-05-10 8:07 ` Emanuele Panigati
2021-05-10 18:06 ` Albert Astals Cid
2021-05-11 3:29 ` Kai-Heng Feng
2021-05-11 12:57 ` Jani Nikula
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210107182026.24848-2-ville.syrjala@linux.intel.com \
--to=ville.syrjala@linux.intel.com \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox