AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Alex Deucher <alexander.deucher@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Likun Gao <Likun.Gao@amd.com>,
	Hawking Zhang <Hawking.Zhang@amd.com>,
	"Alex Deucher" <alexander.deucher@amd.com>
Subject: [PATCH] drm/amdgpu: Correct inst_id input from physical to logic
Date: Wed, 10 Dec 2025 02:14:07 -0500	[thread overview]
Message-ID: <20251210071415.19983-13-alexander.deucher@amd.com> (raw)
In-Reply-To: <20251210071415.19983-1-alexander.deucher@amd.com>

From: Likun Gao <Likun.Gao@amd.com>

Correct inst_id input from physical to logic for sdma v7_1.
V2: Show real instance number on logic xcc.

Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c | 50 +++++++++++++-------------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
index 446b7527f5c62..e3963675bfac0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
@@ -369,7 +369,7 @@ static void sdma_v7_1_inst_gfx_stop(struct amdgpu_device *adev,
 	u32 rb_cntl, ib_cntl;
 	int i;
 
-	for_each_inst(i, inst_mask) {
+	for (i = 0; i < NUM_XCC(inst_mask); i++) {
 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_1_get_reg_offset(adev, i, regSDMA0_SDMA_QUEUE0_RB_CNTL));
 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_SDMA_QUEUE0_RB_CNTL, RB_ENABLE, 0);
 		WREG32_SOC15_IP(GC, sdma_v7_1_get_reg_offset(adev, i, regSDMA0_SDMA_QUEUE0_RB_CNTL), rb_cntl);
@@ -436,7 +436,7 @@ static void sdma_v7_1_inst_enable(struct amdgpu_device *adev,
 	if (amdgpu_sriov_vf(adev))
 		return;
 
-	for_each_inst(i, inst_mask) {
+	for (i = 0; i < NUM_XCC(inst_mask); i++) {
 		mcu_cntl = RREG32_SOC15_IP(GC, sdma_v7_1_get_reg_offset(adev, i, regSDMA0_SDMA_MCU_CNTL));
 		mcu_cntl = REG_SET_FIELD(mcu_cntl, SDMA0_SDMA_MCU_CNTL, HALT, enable ? 0 : 1);
 		WREG32_SOC15_IP(GC, sdma_v7_1_get_reg_offset(adev, i, regSDMA0_SDMA_MCU_CNTL), mcu_cntl);
@@ -617,7 +617,7 @@ static int sdma_v7_1_inst_gfx_resume(struct amdgpu_device *adev,
 {
 	int i, r;
 
-	for_each_inst(i, inst_mask) {
+	for (i = 0; i < NUM_XCC(inst_mask); i++) {
 		r = sdma_v7_1_gfx_resume_instance(adev, i, false);
 		if (r)
 			return r;
@@ -647,7 +647,7 @@ static void sdma_v7_1_inst_free_ucode_buffer(struct amdgpu_device *adev,
 {
 	int i;
 
-	for_each_inst(i, inst_mask) {
+	for (i = 0; i < NUM_XCC(inst_mask); i++) {
 		amdgpu_bo_free_kernel(&adev->sdma.instance[i].sdma_fw_obj,
 				      &adev->sdma.instance[i].sdma_fw_gpu_addr,
 				      (void **)&adev->sdma.instance[i].sdma_fw_ptr);
@@ -686,7 +686,7 @@ static int sdma_v7_1_inst_load_microcode(struct amdgpu_device *adev,
 			le32_to_cpu(hdr->ucode_offset_bytes));
 	fw_size = le32_to_cpu(hdr->ucode_size_bytes);
 
-	for_each_inst(i, inst_mask) {
+	for (i = 0; i < NUM_XCC(inst_mask); i++) {
 		r = amdgpu_bo_create_reserved(adev, fw_size,
 					      PAGE_SIZE,
 					      AMDGPU_GEM_DOMAIN_VRAM,
@@ -744,10 +744,10 @@ static int sdma_v7_1_soft_reset(struct amdgpu_ip_block *ip_block)
 	u32 tmp;
 	int i;
 
-	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
+	inst_mask = adev->sdma.sdma_mask;
 	sdma_v7_1_inst_gfx_stop(adev, inst_mask);
 
-	for_each_inst(i, inst_mask) {
+	for (i = 0; i < NUM_XCC(inst_mask); i++) {
 		//tmp = RREG32_SOC15_IP(GC, sdma_v7_1_get_reg_offset(adev, i, regSDMA0_SDMA_FREEZE));
 		//tmp |= SDMA0_SDMA_FREEZE__FREEZE_MASK;
 		//WREG32_SOC15_IP(GC, sdma_v7_1_get_reg_offset(adev, i, regSDMA0_SDMA_FREEZE), tmp);
@@ -1288,10 +1288,14 @@ static int sdma_v7_1_sw_init(struct amdgpu_ip_block *ip_block)
 		ring->ring_obj = NULL;
 		ring->use_doorbell = true;
 		ring->me = i;
-		xcc_id = adev->sdma.instance[i].xcc_id;
+
+		for (xcc_id = 0; xcc_id < fls(adev->gfx.xcc_mask); xcc_id++) {
+			if (adev->sdma.instance[i].xcc_id == GET_INST(GC, xcc_id))
+				break;
+		}
 
 		DRM_DEBUG("SDMA%d.%d use_doorbell being set to: [%s]\n",
-				xcc_id, i % adev->sdma.num_inst_per_xcc,
+				xcc_id, GET_INST(SDMA0, i) % adev->sdma.num_inst_per_xcc,
 				ring->use_doorbell?"true":"false");
 
 		ring->doorbell_index =
@@ -1299,7 +1303,7 @@ static int sdma_v7_1_sw_init(struct amdgpu_ip_block *ip_block)
 
 		ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
 		sprintf(ring->name, "sdma%d.%d", xcc_id,
-				i % adev->sdma.num_inst_per_xcc);
+				GET_INST(SDMA0, i) % adev->sdma.num_inst_per_xcc);
 		r = amdgpu_ring_init(adev, ring, 1024,
 				     &adev->sdma.trap_irq,
 				     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
@@ -1334,11 +1338,8 @@ static int sdma_v7_1_sw_init(struct amdgpu_ip_block *ip_block)
 static int sdma_v7_1_sw_fini(struct amdgpu_ip_block *ip_block)
 {
 	struct amdgpu_device *adev = ip_block->adev;
-	uint32_t inst_mask;
 	int i;
 
-	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
-
 	for (i = 0; i < adev->sdma.num_instances; i++)
 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
@@ -1346,7 +1347,7 @@ static int sdma_v7_1_sw_fini(struct amdgpu_ip_block *ip_block)
 	amdgpu_sdma_destroy_inst_ctx(adev, true);
 
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
-		sdma_v7_1_inst_free_ucode_buffer(adev, inst_mask);
+		sdma_v7_1_inst_free_ucode_buffer(adev, adev->sdma.sdma_mask);
 
 	kfree(adev->sdma.ip_dump);
 
@@ -1356,24 +1357,19 @@ static int sdma_v7_1_sw_fini(struct amdgpu_ip_block *ip_block)
 static int sdma_v7_1_hw_init(struct amdgpu_ip_block *ip_block)
 {
 	struct amdgpu_device *adev = ip_block->adev;
-	uint32_t inst_mask;
-
-	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
 
-	return sdma_v7_1_inst_start(adev, inst_mask);
+	return sdma_v7_1_inst_start(adev, adev->sdma.sdma_mask);
 }
 
 static int sdma_v7_1_hw_fini(struct amdgpu_ip_block *ip_block)
 {
 	struct amdgpu_device *adev = ip_block->adev;
-	uint32_t inst_mask;
 
 	if (amdgpu_sriov_vf(adev))
 		return 0;
 
-	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
-	sdma_v7_1_inst_ctx_switch_enable(adev, false, inst_mask);
-	sdma_v7_1_inst_enable(adev, false, inst_mask);
+	sdma_v7_1_inst_ctx_switch_enable(adev, false, adev->sdma.sdma_mask);
+	sdma_v7_1_inst_enable(adev, false, adev->sdma.sdma_mask);
 
 	return 0;
 }
@@ -1493,7 +1489,7 @@ static int sdma_v7_1_process_trap_irq(struct amdgpu_device *adev,
 				      struct amdgpu_irq_src *source,
 				      struct amdgpu_iv_entry *entry)
 {
-	int instances, queue, xcc_id = 0;
+	int inst, instances, queue, xcc_id = 0;
 	uint32_t mes_queue_id = entry->src_data[0];
 
 	DRM_DEBUG("IH: SDMA trap\n");
@@ -1518,8 +1514,12 @@ static int sdma_v7_1_process_trap_irq(struct amdgpu_device *adev,
 		xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, entry->node_id);
 	else
 		dev_warn(adev->dev, "IH: SDMA may get wrong xcc id as gfx function not available\n");
-	instances = ((entry->ring_id & 0xf0) >> 4) +
-		xcc_id * adev->sdma.num_inst_per_xcc;
+	inst = ((entry->ring_id & 0xf0) >> 4) +
+		GET_INST(GC, xcc_id) * adev->sdma.num_inst_per_xcc;
+	for (instances = 0; instances < adev->sdma.num_instances; instances++) {
+		if (inst == GET_INST(SDMA0, instances))
+			break;
+	}
 	if (instances > adev->sdma.num_instances - 1) {
 		DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n");
 		return -EINVAL;
-- 
2.52.0


  parent reply	other threads:[~2025-12-10  7:14 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-10  7:13 [PATCH] drm/amdgpu: Flush TLB on all XCCs on GFX 12.1 Alex Deucher
2025-12-10  7:13 ` [PATCH] drm/amdgpu: Add soc v1_0 ih client id table Alex Deucher
2025-12-10  7:13 ` [PATCH] drm/amdkfd: Update CWSR area calculations for GFX 12.1 Alex Deucher
2025-12-10  7:13 ` [PATCH] drm/amdgpu: Fix CU info " Alex Deucher
2025-12-10  7:13 ` [PATCH] drm/amdgpu: init RS64_MEC_P2/P3_STACK for gfx12.1 Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: Enable 5-level page table for GFX 12.1.0 Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdkfd: Update LDS, Scratch base for 57bit address Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: Add pde3 table invalidation request for GFX 12.1.0 Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: Support 57bit fault address " Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: Fix CP_MEC_MDBASE in multi-xcc for gfx v12_1 Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: Correct xcc_id input to GET_INST from physical to logic Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: use physical xcc id to get rrmt Alex Deucher
2025-12-10  7:14 ` Alex Deucher [this message]
2025-12-10  7:14 ` [PATCH] drm/amdgpu: support xcc harvest for ih translate Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: normalize reg addr as local xcc for gfx v12_1 Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu/mes_v12_1: fix mes access xcd register Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: add gfx sysfs support for gfx_v12_1 Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: correct rlc autoload for xcc harvest Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdkfd: Override KFD SVM mappings for GFX 12.1 Alex Deucher
2025-12-10  7:14 ` [PATCH] drm/amdgpu: Add gfx v12_1 interrupt source header Alex Deucher

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251210071415.19983-13-alexander.deucher@amd.com \
    --to=alexander.deucher@amd.com \
    --cc=Hawking.Zhang@amd.com \
    --cc=Likun.Gao@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox