From: "Christian König" <ckoenig.leichtzumerken@gmail.com>
To: boyuan.zhang@amd.com, amd-gfx@lists.freedesktop.org,
leo.liu@amd.com, christian.koenig@amd.com,
alexander.deucher@amd.com, sunil.khatri@amd.com
Subject: Re: [PATCH 02/32] drm/amd/pm: power up or down vcn by instance
Date: Wed, 9 Oct 2024 13:37:50 +0200 [thread overview]
Message-ID: <52c6f4ca-0e33-47a5-8bac-fe51ba27ab5f@gmail.com> (raw)
In-Reply-To: <20241008211553.36264-3-boyuan.zhang@amd.com>
Am 08.10.24 um 23:15 schrieb boyuan.zhang@amd.com:
> From: Boyuan Zhang <boyuan.zhang@amd.com>
>
> For smu ip with multiple vcn instances (smu 11/13/14), remove all the
> for loop in dpm_set_vcn_enable() functions. And use the instance
> argument to power up/down vcn for the given instance only, instead
> of powering up/down for all vcn instances.
>
> v2: remove all duplicated functions in v1.
>
> remove for-loop from each ip, and temporarily move to dpm_set_vcn_enable,
> in order to keep the exact same logic as before, until further separation
> in the next patch.
>
> Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
> ---
> drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 7 +++-
> .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 20 +++++------
> .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 16 ++++-----
> .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 35 ++++++++-----------
> 4 files changed, 35 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 6305fd7d2573..410d5baedfbc 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -237,6 +237,7 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
> {
> struct smu_power_context *smu_power = &smu->smu_power;
> struct smu_power_gate *power_gate = &smu_power->power_gate;
> + struct amdgpu_device *adev = smu->adev;
> int ret = 0;
>
> /*
> @@ -251,7 +252,11 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
> if (atomic_read(&power_gate->vcn_gated) ^ enable)
> return 0;
>
> - ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff);
> + for (int i = 0; i < adev->vcn.num_vcn_inst; i++) {
> + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, i);
> + if (ret)
> + return ret;
> + }
> if (!ret)
I think you should be able to drop this if (!ret) now.
Apart from that looks really good,
Christian.
> atomic_set(&power_gate->vcn_gated, !enable);
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 2438f813d6db..f41d8fd09bce 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -1157,19 +1157,15 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu,
> int inst)
> {
> struct amdgpu_device *adev = smu->adev;
> - int i, ret = 0;
> + int ret = 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> - /* vcn dpm on is a prerequisite for vcn power gate messages */
> - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> - 0x10000 * i, NULL);
> - if (ret)
> - return ret;
> - }
> + if (adev->vcn.harvest_config & (1 << inst))
> + return ret;
> + /* vcn dpm on is a prerequisite for vcn power gate messages */
> + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> + 0x10000 * inst, NULL);
> }
>
> return ret;
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
> index 1d37b9e251d4..e57d4926250e 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
> @@ -2092,18 +2092,14 @@ int smu_v13_0_set_vcn_enable(struct smu_context *smu,
> int inst)
> {
> struct amdgpu_device *adev = smu->adev;
> - int i, ret = 0;
> + int ret = 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> + if (adev->vcn.harvest_config & (1 << inst))
> + return ret;
>
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> - i << 16U, NULL);
> - if (ret)
> - return ret;
> - }
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> + inst << 16U, NULL);
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
> index 5e1165ecdf7b..20822ac623c7 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
> @@ -1496,29 +1496,24 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
> int inst)
> {
> struct amdgpu_device *adev = smu->adev;
> - int i, ret = 0;
> + int ret = 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> + if (adev->vcn.harvest_config & (1 << inst))
> + return ret;
>
> - if (smu->is_apu) {
> - if (i == 0)
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
> - i << 16U, NULL);
> - else if (i == 1)
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
> - i << 16U, NULL);
> - } else {
> + if (smu->is_apu) {
> + if (inst == 0)
> ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> - i << 16U, NULL);
> - }
> -
> - if (ret)
> - return ret;
> + SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
> + inst << 16U, NULL);
> + else if (inst == 1)
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
> + inst << 16U, NULL);
> + } else {
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> + inst << 16U, NULL);
> }
>
> return ret;
next prev parent reply other threads:[~2024-10-09 11:37 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-08 21:15 [PATCH 00/32] Separating vcn power management by instance boyuan.zhang
2024-10-08 21:15 ` [PATCH 01/32] drm/amd/pm: add inst to dpm_set_vcn_enable boyuan.zhang
2024-10-08 21:15 ` [PATCH 02/32] drm/amd/pm: power up or down vcn by instance boyuan.zhang
2024-10-09 11:37 ` Christian König [this message]
2024-10-08 21:15 ` [PATCH 03/32] drm/amd/pm: add inst to smu_dpm_set_vcn_enable boyuan.zhang
2024-10-08 21:15 ` [PATCH 04/32] drm/amd/pm: add inst to set_powergating_by_smu boyuan.zhang
2024-10-08 21:15 ` [PATCH 05/32] drm/amd/pm: add inst to dpm_set_powergating_by_smu boyuan.zhang
2024-10-08 21:15 ` [PATCH 06/32] add inst to amdgpu_dpm_enable_vcn boyuan.zhang
2024-10-08 21:15 ` [PATCH 07/32] drm/amdgpu: pass ip_block in set_powergating_state boyuan.zhang
2024-10-08 21:15 ` [PATCH 08/32] drm/amdgpu: pass ip_block in set_clockgating_state boyuan.zhang
2024-10-08 21:15 ` [PATCH 09/32] drm/amdgpu: track instances of the same IP block boyuan.zhang
2024-10-08 21:15 ` [PATCH 10/32] drm/amdgpu: move per inst variables to amdgpu_vcn_inst boyuan.zhang
2024-10-08 21:15 ` [PATCH 11/32] drm/amdgpu/vcn: separate gating state by instance boyuan.zhang
2024-10-08 21:15 ` [PATCH 12/32] drm/amdgpu: power vcn 2_5 " boyuan.zhang
2024-10-08 21:15 ` [PATCH 13/32] drm/amdgpu: power vcn 3_0 " boyuan.zhang
2024-10-08 21:15 ` [PATCH 14/32] drm/amdgpu: power vcn 4_0 " boyuan.zhang
2024-10-08 21:15 ` [PATCH 15/32] drm/amdgpu: power vcn 4_0_3 " boyuan.zhang
2024-10-08 21:15 ` [PATCH 16/32] drm/amdgpu: power vcn 4_0_5 " boyuan.zhang
2024-10-08 21:15 ` [PATCH 17/32] drm/amdgpu: power vcn 5_0_0 " boyuan.zhang
2024-10-08 21:15 ` [PATCH 18/32] drm/amdgpu/vcn: separate idle work " boyuan.zhang
2024-10-08 21:15 ` [PATCH 19/32] drm/amdgpu: set powergating state by vcn instance boyuan.zhang
2024-10-08 21:15 ` [PATCH 20/32] drm/amdgpu: early_init for each " boyuan.zhang
2024-10-08 21:15 ` [PATCH 21/32] drm/amdgpu: sw_init " boyuan.zhang
2024-10-08 21:15 ` [PATCH 22/32] drm/amdgpu: sw_fini " boyuan.zhang
2024-10-08 21:15 ` [PATCH 23/32] drm/amdgpu: hw_init " boyuan.zhang
2024-10-08 21:15 ` [PATCH 24/32] drm/amdgpu: suspend " boyuan.zhang
2024-10-08 21:15 ` [PATCH 25/32] drm/amdgpu: resume " boyuan.zhang
2024-10-08 21:15 ` [PATCH 26/32] drm/amdgpu: setup_ucode " boyuan.zhang
2024-10-08 21:15 ` [PATCH 27/32] drm/amdgpu: set funcs " boyuan.zhang
2024-10-08 21:15 ` [PATCH 28/32] drm/amdgpu: print_ip_state " boyuan.zhang
2024-10-08 21:15 ` [PATCH 29/32] drm/amdgpu: dump_ip_state " boyuan.zhang
2024-10-08 21:15 ` [PATCH 30/32] drm/amdgpu: wait_for_idle " boyuan.zhang
2024-10-08 21:15 ` [PATCH 31/32] drm/amdgpu: is_idle " boyuan.zhang
2024-10-08 21:15 ` [PATCH 32/32] drm/amdgpu: set_powergating " boyuan.zhang
-- strict thread matches above, loose matches on Subject: below --
2024-10-17 13:20 [PATCH 00/32] Separating vcn power management by instance boyuan.zhang
2024-10-17 13:20 ` [PATCH 02/32] drm/amd/pm: power up or down vcn " boyuan.zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=52c6f4ca-0e33-47a5-8bac-fe51ba27ab5f@gmail.com \
--to=ckoenig.leichtzumerken@gmail.com \
--cc=alexander.deucher@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=boyuan.zhang@amd.com \
--cc=christian.koenig@amd.com \
--cc=leo.liu@amd.com \
--cc=sunil.khatri@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox