AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Khatri, Sunil" <sunil.khatri@amd.com>
To: boyuan.zhang@amd.com, amd-gfx@lists.freedesktop.org,
	leo.liu@amd.com, christian.koenig@amd.com,
	alexander.deucher@amd.com
Subject: Re: [PATCH 28/32] drm/amdgpu: print_ip_state for each vcn instance
Date: Tue, 22 Oct 2024 14:10:27 +0530	[thread overview]
Message-ID: <385a987f-856f-0c80-705e-dd074c30d7f7@amd.com> (raw)
In-Reply-To: <20241017132053.53214-29-boyuan.zhang@amd.com>

[-- Attachment #1: Type: text/plain, Size: 17538 bytes --]

Reviewed-by: Sunil Khatri <sunil.khatri@amd.com 
<mailto:christian.koenig@amd.com>>

On 10/17/2024 6:50 PM, boyuan.zhang@amd.com wrote:
> From: Boyuan Zhang <boyuan.zhang@amd.com>
>
> Perform print_ip_state only for the instance of the current vcn IP block,
> instead of perform it for all vcn instances.
>
> Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 33 ++++++++++++-------------
>   drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 33 ++++++++++++-------------
>   drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   | 33 ++++++++++++-------------
>   drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c   | 33 ++++++++++++-------------
>   drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c   | 33 ++++++++++++-------------
>   drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 33 ++++++++++++-------------
>   drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 33 ++++++++++++-------------
>   drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c | 33 ++++++++++++-------------
>   8 files changed, 128 insertions(+), 136 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> index ab06c4173b66..9255bcfc6c3d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> @@ -1933,7 +1933,8 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
>   static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
>   	uint32_t inst_off, is_powered;
>   
> @@ -1941,24 +1942,22 @@ static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> index 59c6458c99af..94f000ed4895 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> @@ -2041,7 +2041,8 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
>   static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
>   	uint32_t inst_off, is_powered;
>   
> @@ -2049,24 +2050,22 @@ static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
> index b0d14330722b..7c9a0169215e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
> @@ -1915,7 +1915,8 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev, int inst)
>   static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
>   	uint32_t inst_off, is_powered;
>   
> @@ -1923,24 +1924,22 @@ static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> index 9a9acec4c4ce..061c958700d8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> @@ -2237,7 +2237,8 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev, int inst)
>   static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
>   	uint32_t inst_off;
>   	bool is_powered;
> @@ -2246,24 +2247,22 @@ static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -			      UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			  UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
> index ff256fb4d054..fdf346bf3e34 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
> @@ -2146,7 +2146,8 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev, int inst)
>   static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
>   	uint32_t inst_off, is_powered;
>   
> @@ -2154,24 +2155,22 @@ static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
> index 544c96e082b9..daaf2fb6b3e5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
> @@ -1726,7 +1726,8 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev, int inst)
>   static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
>   	uint32_t inst_off, is_powered;
>   
> @@ -1734,24 +1735,22 @@ static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct d
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
> index 880353f75296..ff8db22b9614 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
> @@ -1607,7 +1607,8 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev, int inst)
>   static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
>   	uint32_t inst_off, is_powered;
>   
> @@ -1615,24 +1616,22 @@ static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct d
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
> index a4d1b9a086b2..c83a5c09f410 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
> @@ -1334,7 +1334,8 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev, int inst)
>   static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
> -	int i, j;
> +	int inst = ip_block->instance;
> +	int j;
>   	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
>   	uint32_t inst_off, is_powered;
>   
> @@ -1342,24 +1343,22 @@ static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm
>   		return;
>   
>   	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
> -	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> -		if (adev->vcn.harvest_config & (1 << i)) {
> -			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
> -			continue;
> -		}
> +	if (adev->vcn.harvest_config & (1 << inst)) {
> +		drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", inst);
> +		return;
> +	}
>   
> -		inst_off = i * reg_count;
> -		is_powered = (adev->vcn.ip_dump[inst_off] &
> -				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
> +	inst_off = inst * reg_count;
> +	is_powered = (adev->vcn.ip_dump[inst_off] &
> +			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
>   
> -		if (is_powered) {
> -			drm_printf(p, "\nActive Instance:VCN%d\n", i);
> -			for (j = 0; j < reg_count; j++)
> -				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
> -					   adev->vcn.ip_dump[inst_off + j]);
> -		} else {
> -			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
> -		}
> +	if (is_powered) {
> +		drm_printf(p, "\nActive Instance:VCN%d\n", inst);
> +		for (j = 0; j < reg_count; j++)
> +			drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
> +				   adev->vcn.ip_dump[inst_off + j]);
> +	} else {
> +		drm_printf(p, "\nInactive Instance:VCN%d\n", inst);
>   	}
>   }
>   

[-- Attachment #2: Type: text/html, Size: 53862 bytes --]

  reply	other threads:[~2024-10-22  8:40 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-17 13:20 [PATCH 00/32] Separating vcn power management by instance boyuan.zhang
2024-10-17 13:20 ` [PATCH 01/32] drm/amd/pm: add inst to dpm_set_vcn_enable boyuan.zhang
2024-10-17 13:20 ` [PATCH 02/32] drm/amd/pm: power up or down vcn by instance boyuan.zhang
2024-10-17 13:20 ` [PATCH 03/32] drm/amd/pm: add inst to smu_dpm_set_vcn_enable boyuan.zhang
2024-10-17 13:20 ` [PATCH 04/32] drm/amd/pm: add inst to set_powergating_by_smu boyuan.zhang
2024-10-17 13:20 ` [PATCH 05/32] drm/amd/pm: add inst to dpm_set_powergating_by_smu boyuan.zhang
2024-10-17 13:20 ` [PATCH 06/32] add inst to amdgpu_dpm_enable_vcn boyuan.zhang
2024-10-17 13:20 ` [PATCH 07/32] drm/amdgpu: pass ip_block in set_powergating_state boyuan.zhang
2024-10-22  7:42   ` Khatri, Sunil
2024-10-25  2:46     ` Boyuan Zhang
2024-10-17 13:20 ` [PATCH 08/32] drm/amdgpu: pass ip_block in set_clockgating_state boyuan.zhang
2024-10-22  7:58   ` Khatri, Sunil
2024-10-25  2:48     ` Boyuan Zhang
2024-10-17 13:20 ` [PATCH 09/32] drm/amdgpu: track instances of the same IP block boyuan.zhang
2024-10-17 13:20 ` [PATCH 10/32] drm/amdgpu: move per inst variables to amdgpu_vcn_inst boyuan.zhang
2024-10-17 13:20 ` [PATCH 11/32] drm/amdgpu/vcn: separate gating state by instance boyuan.zhang
2024-10-17 13:20 ` [PATCH 12/32] drm/amdgpu: power vcn 2_5 " boyuan.zhang
2024-10-17 13:20 ` [PATCH 13/32] drm/amdgpu: power vcn 3_0 " boyuan.zhang
2024-10-17 13:20 ` [PATCH 14/32] drm/amdgpu: power vcn 4_0 " boyuan.zhang
2024-10-17 13:20 ` [PATCH 15/32] drm/amdgpu: power vcn 4_0_3 " boyuan.zhang
2024-10-17 13:20 ` [PATCH 16/32] drm/amdgpu: power vcn 4_0_5 " boyuan.zhang
2024-10-17 13:20 ` [PATCH 17/32] drm/amdgpu: power vcn 5_0_0 " boyuan.zhang
2024-10-17 13:20 ` [PATCH 18/32] drm/amdgpu/vcn: separate idle work " boyuan.zhang
2024-10-17 13:20 ` [PATCH 19/32] drm/amdgpu: set powergating state by vcn instance boyuan.zhang
2024-10-17 13:20 ` [PATCH 20/32] drm/amdgpu: early_init for each " boyuan.zhang
2024-10-17 13:20 ` [PATCH 21/32] drm/amdgpu: sw_init " boyuan.zhang
2024-10-17 13:20 ` [PATCH 22/32] drm/amdgpu: sw_fini " boyuan.zhang
2024-10-28 19:40   ` Alex Deucher
2024-10-17 13:20 ` [PATCH 23/32] drm/amdgpu: hw_init " boyuan.zhang
2024-10-17 13:20 ` [PATCH 24/32] drm/amdgpu: suspend " boyuan.zhang
2024-10-17 13:20 ` [PATCH 25/32] drm/amdgpu: resume " boyuan.zhang
2024-10-17 13:20 ` [PATCH 26/32] drm/amdgpu: setup_ucode " boyuan.zhang
2024-10-17 13:20 ` [PATCH 27/32] drm/amdgpu: set funcs " boyuan.zhang
2024-10-17 13:20 ` [PATCH 28/32] drm/amdgpu: print_ip_state " boyuan.zhang
2024-10-22  8:40   ` Khatri, Sunil [this message]
2024-10-17 13:20 ` [PATCH 29/32] drm/amdgpu: dump_ip_state " boyuan.zhang
2024-10-22  8:56   ` Khatri, Sunil
2024-10-22  8:59     ` Khatri, Sunil
2024-10-22 12:37       ` Khatri, Sunil
2024-10-17 13:20 ` [PATCH 30/32] drm/amdgpu: wait_for_idle " boyuan.zhang
2024-10-17 13:20 ` [PATCH 31/32] drm/amdgpu: is_idle " boyuan.zhang
2024-10-17 13:20 ` [PATCH 32/32] drm/amdgpu: set_powergating " boyuan.zhang
2024-10-22  6:25 ` [PATCH 00/32] Separating vcn power management by instance Christian König
2024-10-25  2:53   ` Boyuan Zhang
  -- strict thread matches above, loose matches on Subject: below --
2024-10-08 21:15 boyuan.zhang
2024-10-08 21:15 ` [PATCH 28/32] drm/amdgpu: print_ip_state for each vcn instance boyuan.zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=385a987f-856f-0c80-705e-dd074c30d7f7@amd.com \
    --to=sunil.khatri@amd.com \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=boyuan.zhang@amd.com \
    --cc=christian.koenig@amd.com \
    --cc=leo.liu@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox