From: "Lisovskiy, Stanislav" <stanislav.lisovskiy@intel.com>
To: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: intel-gfx@lists.freedesktop.org
Subject: Re: [Intel-gfx] [PATCH 5/6] drm/i915: Store plane relative data rate in crtc_state
Date: Fri, 13 Nov 2020 17:26:54 +0200 [thread overview]
Message-ID: <20201113152654.GB17194@intel.com> (raw)
In-Reply-To: <20201106173042.7534-6-ville.syrjala@linux.intel.com>
On Fri, Nov 06, 2020 at 07:30:41PM +0200, Ville Syrjala wrote:
> From: Ville Syrjälä <ville.syrjala@linux.intel.com>
>
> Store the relative data rate for planes in the crtc state
> so that we don't have to use
> intel_atomic_crtc_state_for_each_plane_state() to compute
> it even for the planes that are no part of the current state.
>
> Should probably just nuke this stuff entirely an use the normal
> plane data rate instead. The two are slightly different since this
> relative data rate doesn't factor in the actual pixel clock, so
> it's a bit odd thing to even call a "data rate". And since the
> watermarks are computed based on the actual data rate anyway
> I don't really see what the point of this relative data rate
> is. But that's for the future...
>
> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> ---
> .../drm/i915/display/intel_display_types.h | 4 +
> drivers/gpu/drm/i915/intel_pm.c | 83 ++++++++++---------
> 2 files changed, 50 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
> index 8a0276044832..768bd3dc77dc 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_types.h
> +++ b/drivers/gpu/drm/i915/display/intel_display_types.h
> @@ -1024,6 +1024,10 @@ struct intel_crtc_state {
>
> u32 data_rate[I915_MAX_PLANES];
>
> + /* FIXME unify with data_rate[] */
> + u64 plane_data_rate[I915_MAX_PLANES];
> + u64 uv_plane_data_rate[I915_MAX_PLANES];
> +
> /* Gamma mode programmed on the pipe */
> u32 gamma_mode;
>
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index b789ad78319b..8865f37d6297 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -4696,50 +4696,63 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
> }
>
> static u64
> -skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
> - u64 *plane_data_rate,
> - u64 *uv_plane_data_rate)
> +skl_get_total_relative_data_rate(struct intel_atomic_state *state,
> + struct intel_crtc *crtc)
> {
> - struct intel_plane *plane;
> + struct intel_crtc_state *crtc_state =
> + intel_atomic_get_new_crtc_state(state, crtc);
> const struct intel_plane_state *plane_state;
> + struct intel_plane *plane;
> u64 total_data_rate = 0;
> + enum plane_id plane_id;
> + int i;
>
> /* Calculate and cache data rate for each plane */
> - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
> - enum plane_id plane_id = plane->id;
> - u64 rate;
> + for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
> + if (plane->pipe != crtc->pipe)
> + continue;
> +
> + plane_id = plane->id;
>
> /* packed/y */
> - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
> - plane_data_rate[plane_id] = rate;
> - total_data_rate += rate;
> + crtc_state->plane_data_rate[plane_id] =
> + skl_plane_relative_data_rate(crtc_state, plane_state, 0);
>
> /* uv-plane */
> - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
> - uv_plane_data_rate[plane_id] = rate;
> - total_data_rate += rate;
> + crtc_state->uv_plane_data_rate[plane_id] =
> + skl_plane_relative_data_rate(crtc_state, plane_state, 1);
> + }
> +
> + for_each_plane_id_on_crtc(crtc, plane_id) {
> + total_data_rate += crtc_state->plane_data_rate[plane_id];
> + total_data_rate += crtc_state->uv_plane_data_rate[plane_id];
> }
>
> return total_data_rate;
> }
>
> static u64
> -icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
> - u64 *plane_data_rate)
> +icl_get_total_relative_data_rate(struct intel_atomic_state *state,
> + struct intel_crtc *crtc)
> {
> - struct intel_plane *plane;
> + struct intel_crtc_state *crtc_state =
> + intel_atomic_get_new_crtc_state(state, crtc);
> const struct intel_plane_state *plane_state;
> + struct intel_plane *plane;
> u64 total_data_rate = 0;
> + enum plane_id plane_id;
> + int i;
>
> /* Calculate and cache data rate for each plane */
> - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
> - enum plane_id plane_id = plane->id;
> - u64 rate;
> + for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
> + if (plane->pipe != crtc->pipe)
> + continue;
> +
> + plane_id = plane->id;
>
> if (!plane_state->planar_linked_plane) {
> - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
> - plane_data_rate[plane_id] = rate;
> - total_data_rate += rate;
> + crtc_state->plane_data_rate[plane_id] =
> + skl_plane_relative_data_rate(crtc_state, plane_state, 0);
> } else {
> enum plane_id y_plane_id;
>
> @@ -4754,17 +4767,18 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
> continue;
>
> /* Y plane rate is calculated on the slave */
> - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
> y_plane_id = plane_state->planar_linked_plane->id;
> - plane_data_rate[y_plane_id] = rate;
> - total_data_rate += rate;
> + crtc_state->plane_data_rate[y_plane_id] =
> + skl_plane_relative_data_rate(crtc_state, plane_state, 0);
>
> - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
> - plane_data_rate[plane_id] = rate;
> - total_data_rate += rate;
> + crtc_state->plane_data_rate[plane_id] =
> + skl_plane_relative_data_rate(crtc_state, plane_state, 1);
> }
> }
>
> + for_each_plane_id_on_crtc(crtc, plane_id)
> + total_data_rate += crtc_state->plane_data_rate[plane_id];
> +
> return total_data_rate;
> }
>
> @@ -4796,8 +4810,6 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
> u64 total_data_rate;
> enum plane_id plane_id;
> int num_active;
> - u64 plane_data_rate[I915_MAX_PLANES] = {};
> - u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
> u32 blocks;
> int level;
> int ret;
> @@ -4837,13 +4849,10 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
>
> if (INTEL_GEN(dev_priv) >= 11)
> total_data_rate =
> - icl_get_total_relative_data_rate(crtc_state,
> - plane_data_rate);
> + icl_get_total_relative_data_rate(state, crtc);
> else
> total_data_rate =
> - skl_get_total_relative_data_rate(crtc_state,
> - plane_data_rate,
> - uv_plane_data_rate);
> + skl_get_total_relative_data_rate(state, crtc);
>
> ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
> total_data_rate,
> @@ -4924,7 +4933,7 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
> if (total_data_rate == 0)
> break;
>
> - rate = plane_data_rate[plane_id];
> + rate = crtc_state->plane_data_rate[plane_id];
> extra = min_t(u16, alloc_size,
> DIV64_U64_ROUND_UP(alloc_size * rate,
> total_data_rate));
> @@ -4935,7 +4944,7 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
> if (total_data_rate == 0)
> break;
>
> - rate = uv_plane_data_rate[plane_id];
> + rate = crtc_state->uv_plane_data_rate[plane_id];
> extra = min_t(u16, alloc_size,
> DIV64_U64_ROUND_UP(alloc_size * rate,
> total_data_rate));
> --
> 2.26.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2020-11-13 15:26 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-11-06 17:30 [Intel-gfx] [PATCH 0/6] drm/i915: Eliminate intel_atomic_crtc_state_for_each_plane_state() from skl+ wm code Ville Syrjala
2020-11-06 17:30 ` [Intel-gfx] [PATCH 1/6] drm/i915: Pass intel_atomic_state around Ville Syrjala
2020-11-09 21:47 ` Navare, Manasi
2020-11-06 17:30 ` [Intel-gfx] [PATCH 2/6] drm/i915: Nuke intel_atomic_crtc_state_for_each_plane_state() from skl+ wm code Ville Syrjala
2020-11-11 22:22 ` Lisovskiy, Stanislav
2020-11-06 17:30 ` [Intel-gfx] [PATCH 3/6] drm/i915: Pimp the watermark documentation a bit Ville Syrjala
2020-11-11 22:51 ` Navare, Manasi
2020-11-06 17:30 ` [Intel-gfx] [PATCH 4/6] drm/i915: Precompute can_sagv for each wm level Ville Syrjala
2020-11-12 13:59 ` Lisovskiy, Stanislav
2020-11-13 14:55 ` Ville Syrjälä
2020-11-06 17:30 ` [Intel-gfx] [PATCH 5/6] drm/i915: Store plane relative data rate in crtc_state Ville Syrjala
2020-11-13 15:26 ` Lisovskiy, Stanislav [this message]
2020-11-06 17:30 ` [Intel-gfx] [PATCH 6/6] drm/i915: Remove skl_adjusted_plane_pixel_rate() Ville Syrjala
2020-11-13 15:24 ` Lisovskiy, Stanislav
2020-11-06 18:04 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for drm/i915: Eliminate intel_atomic_crtc_state_for_each_plane_state() from skl+ wm code Patchwork
2020-11-06 18:34 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-11-13 21:40 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for drm/i915: Eliminate intel_atomic_crtc_state_for_each_plane_state() from skl+ wm code (rev2) Patchwork
2020-11-13 22:09 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-11-14 2:01 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201113152654.GB17194@intel.com \
--to=stanislav.lisovskiy@intel.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=ville.syrjala@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox