Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA
@ 2023-10-18  5:33 Jesse Zhang
  2023-10-19  2:39 ` vitaly prosyak
  0 siblings, 1 reply; 8+ messages in thread
From: Jesse Zhang @ 2023-10-18  5:33 UTC (permalink / raw)
  To: igt-dev; +Cc: Tim Huang, Luben Tuikov, Alex Deucher, Christian Koenig

Issue corrupted header or slow sdma linear copy
to trigger SDMA hang test.

V2:
  - avoid generating warning,
    and optimize logical code (Vitlaly)

Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Kamil Konieczny <kamil.konieczny@linux.intel.com>

Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
Signed-off-by: Tim Huang <tim.huang@amd.com>
---
 lib/amdgpu/amd_deadlock_helpers.c | 148 ++++++++++++++++++++++++++++++
 lib/amdgpu/amd_deadlock_helpers.h |   7 ++
 tests/amdgpu/amd_deadlock.c       |  16 ++++
 3 files changed, 171 insertions(+)

diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
index a6be5f02a..8f2d63772 100644
--- a/lib/amdgpu/amd_deadlock_helpers.c
+++ b/lib/amdgpu/amd_deadlock_helpers.c
@@ -248,3 +248,151 @@ bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned i
 	free_cmd_base(base_cmd);
 	amdgpu_cs_ctx_free(context_handle);
 }
+
+void
+amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type)
+{
+	const int sdma_write_length = 1024;
+	amdgpu_context_handle context_handle;
+	amdgpu_bo_handle ib_result_handle;
+	amdgpu_bo_handle bo1, bo2;
+	amdgpu_bo_handle resources[3];
+	amdgpu_bo_list_handle bo_list;
+	void *ib_result_cpu;
+	struct amdgpu_cs_ib_info ib_info;
+	struct amdgpu_cs_request ibs_request;
+	struct amdgpu_cs_fence fence_status;
+	uint64_t bo1_mc, bo2_mc;
+	uint64_t ib_result_mc_address;
+	volatile unsigned char *bo1_cpu, *bo2_cpu;
+	amdgpu_va_handle bo1_va_handle, bo2_va_handle;
+	amdgpu_va_handle va_handle;
+	struct drm_amdgpu_info_hw_ip hw_ip_info;
+	int j, r;
+	uint32_t expired, ib_size;
+	struct amdgpu_cmd_base *base_cmd = get_cmd_base();
+
+	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &hw_ip_info);
+	igt_assert_eq(r, 0);
+
+	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
+	igt_assert_eq(r, 0);
+
+	if (hang_type == DMA_CORRUPTED_HEADER_HANG)
+		ib_size = 4096;
+	else
+		ib_size = 4096 * 0x20000;
+
+	r = amdgpu_bo_alloc_and_map(device_handle, ib_size, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT, 0,
+				    &ib_result_handle, &ib_result_cpu,
+				    &ib_result_mc_address, &va_handle);
+	igt_assert_eq(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle,
+				    sdma_write_length, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT,
+				    0, &bo1,
+				    (void**)&bo1_cpu, &bo1_mc,
+				    &bo1_va_handle);
+	igt_assert_eq(r, 0);
+
+	/* set bo1 */
+	memset((void*)bo1_cpu, 0xaa, sdma_write_length);
+
+	/* allocate UC bo2 for sDMA use */
+	r = amdgpu_bo_alloc_and_map(device_handle,
+				    sdma_write_length, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT,
+				    0, &bo2,
+				    (void**)&bo2_cpu, &bo2_mc,
+				    &bo2_va_handle);
+	igt_assert_eq(r, 0);
+
+	/* clear bo2 */
+	memset((void*)bo2_cpu, 0, sdma_write_length);
+
+	resources[0] = bo1;
+	resources[1] = bo2;
+	resources[2] = ib_result_handle;
+	r = amdgpu_bo_list_create(device_handle, 3,
+				  resources, NULL, &bo_list);
+
+	/* fulfill PM4: with bad copy linear header */
+	base_cmd->attach_buf(base_cmd, ib_result_cpu, ib_size);
+	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
+		base_cmd->emit(base_cmd, 0x23decd3d);
+		base_cmd->emit(base_cmd, (sdma_write_length - 1));
+		base_cmd->emit(base_cmd, 0);
+		base_cmd->emit(base_cmd, (0xffffffff & bo1_mc));
+		base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo1_mc) >> 32));
+		base_cmd->emit(base_cmd, (0xffffffff & bo2_mc));
+		base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo2_mc) >> 32));
+	} else {
+		for (j = 1; j < 0x20000; j++) {
+			base_cmd->emit(base_cmd, SDMA_PACKET(SDMA_OPCODE_COPY,
+						SDMA_COPY_SUB_OPCODE_LINEAR,
+						0));
+			base_cmd->emit(base_cmd, (sdma_write_length - 1));
+			base_cmd->emit(base_cmd, 0);
+			base_cmd->emit(base_cmd, (0xffffffff & bo1_mc));
+			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo1_mc) >> 32));
+			base_cmd->emit(base_cmd, (0xffffffff & bo2_mc));
+			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo2_mc) >> 32));
+			base_cmd->emit(base_cmd, SDMA_PACKET(SDMA_OPCODE_COPY,
+						SDMA_COPY_SUB_OPCODE_LINEAR,
+						0));
+			base_cmd->emit(base_cmd, (sdma_write_length - 1));
+			base_cmd->emit(base_cmd, 0);
+			base_cmd->emit(base_cmd, (0xffffffff & bo2_mc));
+			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo2_mc) >> 32));
+			base_cmd->emit(base_cmd, (0xffffffff & bo1_mc));
+			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo1_mc) >> 32));
+		}
+	}
+
+	/* exec command */
+	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+	ib_info.ib_mc_address = ib_result_mc_address;
+	ib_info.size = base_cmd->cdw;
+
+	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+	ibs_request.ip_type = AMDGPU_HW_IP_DMA;
+	ibs_request.ring = 0;
+	ibs_request.number_of_ibs = 1;
+	ibs_request.ibs = &ib_info;
+	ibs_request.resources = bo_list;
+	ibs_request.fence_info.handle = NULL;
+
+	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
+	igt_assert_eq(r, 0);
+
+	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
+	fence_status.context = context_handle;
+	fence_status.ip_type = AMDGPU_HW_IP_DMA;
+	fence_status.ip_instance = 0;
+	fence_status.ring = 0;
+	fence_status.fence = ibs_request.seq_no;
+
+	r = amdgpu_cs_query_fence_status(&fence_status,
+					 AMDGPU_TIMEOUT_INFINITE,
+					 0, &expired);
+	if (r != 0 && r != -ECANCELED && r != -ETIME)
+		igt_assert(0);
+
+	r = amdgpu_bo_list_destroy(bo_list);
+	igt_assert_eq(r, 0);
+
+	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+				     ib_result_mc_address, 4096);
+
+	amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
+				     sdma_write_length);
+
+	amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
+				     sdma_write_length);
+	/* end of test */
+	r = amdgpu_cs_ctx_free(context_handle);
+	igt_assert_eq(r, 0);
+	free_cmd_base(base_cmd);
+}
diff --git a/lib/amdgpu/amd_deadlock_helpers.h b/lib/amdgpu/amd_deadlock_helpers.h
index cc8eba7f7..9c0d245a9 100644
--- a/lib/amdgpu/amd_deadlock_helpers.h
+++ b/lib/amdgpu/amd_deadlock_helpers.h
@@ -24,11 +24,18 @@
 #ifndef __AMD_DEADLOCK_HELPERS_H__
 #define __AMD_DEADLOCK_HELPERS_H__
 
+enum  hang_type {
+	DMA_CORRUPTED_HEADER_HANG,
+	DMA_SLOW_LINEARCOPY_HANG
+};
+
 void
 amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned ip_type);
 
 void
 bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned ip_type);
 
+void
+amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type);
 #endif
 
diff --git a/tests/amdgpu/amd_deadlock.c b/tests/amdgpu/amd_deadlock.c
index 6147b7636..dc7ec4366 100644
--- a/tests/amdgpu/amd_deadlock.c
+++ b/tests/amdgpu/amd_deadlock.c
@@ -77,6 +77,22 @@ igt_main
 		}
 	}
 
+	igt_describe("Test-GPU-reset-by-sdma-corrupted-header-with-jobs");
+	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-corrupted-header-test") {
+		if (arr_cap[AMD_IP_DMA]) {
+			igt_dynamic_f("amdgpu-deadlock-sdma-corrupted-header-test")
+			amdgpu_hang_sdma_helper(device, DMA_CORRUPTED_HEADER_HANG);
+		}
+	}
+
+	igt_describe("Test-GPU-reset-by-sdma-slow-linear-copy-with-jobs");
+	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-slow-linear-copy") {
+		if (arr_cap[AMD_IP_DMA]) {
+			igt_dynamic_f("amdgpu-deadlock-sdma-slow-linear-copy")
+			amdgpu_hang_sdma_helper(device, DMA_SLOW_LINEARCOPY_HANG);
+		}
+	}
+
 	igt_fixture {
 		amdgpu_device_deinitialize(device);
 		drm_close_driver(fd);
-- 
2.25.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA
  2023-10-18  5:33 Jesse Zhang
@ 2023-10-19  2:39 ` vitaly prosyak
  0 siblings, 0 replies; 8+ messages in thread
From: vitaly prosyak @ 2023-10-19  2:39 UTC (permalink / raw)
  To: Jesse Zhang, igt-dev
  Cc: Tim Huang, Luben Tuikov, Alex Deucher, Christian Koenig

[-- Attachment #1: Type: text/plain, Size: 11462 bytes --]

Hi Jesse,

It looks much better now, thanks!

I tested locally your patch.

There are some formatting issues , for example:

vprosyak@desktop-host:~/src/igt-gpu-tools$ ../linux/scripts/checkpatch.pl -f --no-tree /home/vprosyak/src/igt-gpu-tools/lib/amdgpu/amd_deadlock_helpers.c
WARNING: Use of volatile is usually wrong: see Documentation/process/volatile-considered-harmful.rst
#267: FILE: /home/vprosyak/src/igt-gpu-tools/lib/amdgpu/amd_deadlock_helpers.c:267:
+    volatile unsigned char *bo1_cpu, *bo2_cpu;

ERROR: "(foo**)" should be "(foo **)"
#296: FILE: /home/vprosyak/src/igt-gpu-tools/lib/amdgpu/amd_deadlock_helpers.c:296:
+                    (void**)&bo1_cpu, &bo1_mc,

ERROR: "(foo*)" should be "(foo *)"
#301: FILE: /home/vprosyak/src/igt-gpu-tools/lib/amdgpu/amd_deadlock_helpers.c:301:
+    memset((void*)bo1_cpu, 0xaa, sdma_write_length);

ERROR: "(foo**)" should be "(foo **)"
#308: FILE: /home/vprosyak/src/igt-gpu-tools/lib/amdgpu/amd_deadlock_helpers.c:308:
+                    (void**)&bo2_cpu, &bo2_mc,

ERROR: "(foo*)" should be "(foo *)"
#313: FILE: /home/vprosyak/src/igt-gpu-tools/lib/amdgpu/amd_deadlock_helpers.c:313:
+    memset((void*)bo2_cpu, 0, sdma_write_length);

total: 4 errors, 1 warnings, 398 lines checked

NOTE: For some of the reported defects, checkpatch may be able to
      mechanically convert to the typical style using --fix or --fix-inplace.

/home/vprosyak/src/igt-gpu-tools/lib/amdgpu/amd_deadlock_helpers.c has style problems, please review.


I still have some comments to be addressed see below

Thanks, Vitaly

On 2023-10-18 01:33, Jesse Zhang wrote:
> Issue corrupted header or slow sdma linear copy
> to trigger SDMA hang test.
>
> V2:
>   - avoid generating warning,
>     and optimize logical code (Vitlaly)
>
> Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
> Cc: Luben Tuikov <luben.tuikov@amd.com>
> Cc: Alex Deucher <alexander.deucher@amd.com>
> Cc: Christian Koenig <christian.koenig@amd.com>
> Cc: Kamil Konieczny <kamil.konieczny@linux.intel.com>
>
> Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
> Signed-off-by: Tim Huang <tim.huang@amd.com>
> ---
>  lib/amdgpu/amd_deadlock_helpers.c | 148 ++++++++++++++++++++++++++++++
>  lib/amdgpu/amd_deadlock_helpers.h |   7 ++
>  tests/amdgpu/amd_deadlock.c       |  16 ++++
>  3 files changed, 171 insertions(+)
>
> diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
> index a6be5f02a..8f2d63772 100644
> --- a/lib/amdgpu/amd_deadlock_helpers.c
> +++ b/lib/amdgpu/amd_deadlock_helpers.c
> @@ -248,3 +248,151 @@ bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned i
>  	free_cmd_base(base_cmd);
>  	amdgpu_cs_ctx_free(context_handle);
>  }
> +
> +void
> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type)
> +{
> +	const int sdma_write_length = 1024;
> +	amdgpu_context_handle context_handle;
> +	amdgpu_bo_handle ib_result_handle;
> +	amdgpu_bo_handle bo1, bo2;
> +	amdgpu_bo_handle resources[3];
> +	amdgpu_bo_list_handle bo_list;
> +	void *ib_result_cpu;
> +	struct amdgpu_cs_ib_info ib_info;
> +	struct amdgpu_cs_request ibs_request;
> +	struct amdgpu_cs_fence fence_status;
> +	uint64_t bo1_mc, bo2_mc;
> +	uint64_t ib_result_mc_address;
> +	volatile unsigned char *bo1_cpu, *bo2_cpu;
> +	amdgpu_va_handle bo1_va_handle, bo2_va_handle;
> +	amdgpu_va_handle va_handle;
> +	struct drm_amdgpu_info_hw_ip hw_ip_info;
> +	int j, r;
> +	uint32_t expired, ib_size;
> +	struct amdgpu_cmd_base *base_cmd = get_cmd_base();
These 2 lines below are not required , 'hw_ip_info' is not used.
> +	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &hw_ip_info);
> +	igt_assert_eq(r, 0);
> +
> +	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
> +	igt_assert_eq(r, 0);
> +
> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG)
> +		ib_size = 4096;
> +	else
> +		ib_size = 4096 * 0x20000;
> +
> +	r = amdgpu_bo_alloc_and_map(device_handle, ib_size, 4096,
> +				    AMDGPU_GEM_DOMAIN_GTT, 0,
> +				    &ib_result_handle, &ib_result_cpu,
> +				    &ib_result_mc_address, &va_handle);
> +	igt_assert_eq(r, 0);
> +
> +	r = amdgpu_bo_alloc_and_map(device_handle,
> +				    sdma_write_length, 4096,
> +				    AMDGPU_GEM_DOMAIN_GTT,
> +				    0, &bo1,
> +				    (void**)&bo1_cpu, &bo1_mc,
> +				    &bo1_va_handle);
> +	igt_assert_eq(r, 0);
> +
> +	/* set bo1 */
> +	memset((void*)bo1_cpu, 0xaa, sdma_write_length);
> +
> +	/* allocate UC bo2 for sDMA use */
> +	r = amdgpu_bo_alloc_and_map(device_handle,
> +				    sdma_write_length, 4096,
> +				    AMDGPU_GEM_DOMAIN_GTT,
> +				    0, &bo2,
> +				    (void**)&bo2_cpu, &bo2_mc,
> +				    &bo2_va_handle);
> +	igt_assert_eq(r, 0);
> +
> +	/* clear bo2 */
> +	memset((void*)bo2_cpu, 0, sdma_write_length);
> +
> +	resources[0] = bo1;
> +	resources[1] = bo2;
> +	resources[2] = ib_result_handle;
> +	r = amdgpu_bo_list_create(device_handle, 3,
> +				  resources, NULL, &bo_list);
> +
> +	/* fulfill PM4: with bad copy linear header */
> +	base_cmd->attach_buf(base_cmd, ib_result_cpu, ib_size);

Can you use the following ASIC independent function :

sdma_ring_copy_linear(const struct amdgpu_ip_funcs *func,

              const struct amdgpu_ring_context *context,

              uint32_t *pm4_dw)

The existent example is into 'amdgpu_command_submission_copy_linear_helper'.


Then you need corrupt/overwrite header with value  '0x23decd3d' based on your proposal and the following function could be used:

static void
cmd_emit_at_offset(struct amdgpu_cmd_base  *base, uint32_t value, uint32_t offset_dwords)

with appropriate comment.


> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
> +		base_cmd->emit(base_cmd, 0x23decd3d);
> +		base_cmd->emit(base_cmd, (sdma_write_length - 1));
> +		base_cmd->emit(base_cmd, 0);
> +		base_cmd->emit(base_cmd, (0xffffffff & bo1_mc));
> +		base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo1_mc) >> 32));
> +		base_cmd->emit(base_cmd, (0xffffffff & bo2_mc));
> +		base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo2_mc) >> 32));
> +	} else {
We can use also here sdma_ring_copy_linear in the loop
> +		for (j = 1; j < 0x20000; j++) {
> +			base_cmd->emit(base_cmd, SDMA_PACKET(SDMA_OPCODE_COPY,
> +						SDMA_COPY_SUB_OPCODE_LINEAR,
> +						0));
> +			base_cmd->emit(base_cmd, (sdma_write_length - 1));
> +			base_cmd->emit(base_cmd, 0);
> +			base_cmd->emit(base_cmd, (0xffffffff & bo1_mc));
> +			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo1_mc) >> 32));
> +			base_cmd->emit(base_cmd, (0xffffffff & bo2_mc));
> +			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo2_mc) >> 32));
> +			base_cmd->emit(base_cmd, SDMA_PACKET(SDMA_OPCODE_COPY,
> +						SDMA_COPY_SUB_OPCODE_LINEAR,
> +						0));
> +			base_cmd->emit(base_cmd, (sdma_write_length - 1));
> +			base_cmd->emit(base_cmd, 0);
> +			base_cmd->emit(base_cmd, (0xffffffff & bo2_mc));
> +			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo2_mc) >> 32));
> +			base_cmd->emit(base_cmd, (0xffffffff & bo1_mc));
> +			base_cmd->emit(base_cmd, ((0xffffffff00000000 & bo1_mc) >> 32));
> +		}
> +	}

The whole logic would be become much cleaner , like into 

void amdgpu_command_submission_copy_linear_helper(amdgpu_device_handle device,

                          const struct amdgpu_ip_block_version *ip_block)

We can use 'struct amdgpu_ring_context *ring_context;' and avoid multiple variable on the stack .


Using the approach mentioned above the code would become much cleaner even if the similar routine 'bad_access_helper' uses

'base_cmd->emit' has several custom operations that are not generic, and we do not currently have it as part of 'struct amdgpu_ip_func'

Also there is another change required (add return value)

from

void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type,
                struct amdgpu_ring_context *ring_context)

to

*int* amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type,
                struct amdgpu_ring_context *ring_context)

and the return value could be checked  as following into existent logic.

 (r != 0 && r != -ECANCELED && r != -ETIME)


If the above approach cannot be implemented,  let me know then we can back to your existing logic.

> +
> +	/* exec command */
> +	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
> +	ib_info.ib_mc_address = ib_result_mc_address;
> +	ib_info.size = base_cmd->cdw;
> +
> +	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
> +	ibs_request.ip_type = AMDGPU_HW_IP_DMA;
> +	ibs_request.ring = 0;
> +	ibs_request.number_of_ibs = 1;
> +	ibs_request.ibs = &ib_info;
> +	ibs_request.resources = bo_list;
> +	ibs_request.fence_info.handle = NULL;
> +
> +	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
> +	igt_assert_eq(r, 0);
> +
> +	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
> +	fence_status.context = context_handle;
> +	fence_status.ip_type = AMDGPU_HW_IP_DMA;
> +	fence_status.ip_instance = 0;
> +	fence_status.ring = 0;
> +	fence_status.fence = ibs_request.seq_no;
> +
> +	r = amdgpu_cs_query_fence_status(&fence_status,
> +					 AMDGPU_TIMEOUT_INFINITE,
> +					 0, &expired);
> +	if (r != 0 && r != -ECANCELED && r != -ETIME)
> +		igt_assert(0);
> +
> +	r = amdgpu_bo_list_destroy(bo_list);
> +	igt_assert_eq(r, 0);
> +
> +	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
> +				     ib_result_mc_address, 4096);
> +
> +	amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
> +				     sdma_write_length);
> +
> +	amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
> +				     sdma_write_length);
> +	/* end of test */
> +	r = amdgpu_cs_ctx_free(context_handle);
> +	igt_assert_eq(r, 0);
> +	free_cmd_base(base_cmd);
> +}
> diff --git a/lib/amdgpu/amd_deadlock_helpers.h b/lib/amdgpu/amd_deadlock_helpers.h
> index cc8eba7f7..9c0d245a9 100644
> --- a/lib/amdgpu/amd_deadlock_helpers.h
> +++ b/lib/amdgpu/amd_deadlock_helpers.h
> @@ -24,11 +24,18 @@
>  #ifndef __AMD_DEADLOCK_HELPERS_H__
>  #define __AMD_DEADLOCK_HELPERS_H__
>  
> +enum  hang_type {
> +	DMA_CORRUPTED_HEADER_HANG,
> +	DMA_SLOW_LINEARCOPY_HANG
> +};
> +
>  void
>  amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned ip_type);
>  
>  void
>  bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned ip_type);
>  
> +void
> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type);
>  #endif
>  
> diff --git a/tests/amdgpu/amd_deadlock.c b/tests/amdgpu/amd_deadlock.c
> index 6147b7636..dc7ec4366 100644
> --- a/tests/amdgpu/amd_deadlock.c
> +++ b/tests/amdgpu/amd_deadlock.c
> @@ -77,6 +77,22 @@ igt_main
>  		}
>  	}
>  
> +	igt_describe("Test-GPU-reset-by-sdma-corrupted-header-with-jobs");
> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-corrupted-header-test") {
> +		if (arr_cap[AMD_IP_DMA]) {
> +			igt_dynamic_f("amdgpu-deadlock-sdma-corrupted-header-test")
> +			amdgpu_hang_sdma_helper(device, DMA_CORRUPTED_HEADER_HANG);
> +		}
> +	}
> +
> +	igt_describe("Test-GPU-reset-by-sdma-slow-linear-copy-with-jobs");
> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-slow-linear-copy") {
> +		if (arr_cap[AMD_IP_DMA]) {
> +			igt_dynamic_f("amdgpu-deadlock-sdma-slow-linear-copy")
> +			amdgpu_hang_sdma_helper(device, DMA_SLOW_LINEARCOPY_HANG);
> +		}
> +	}
> +
>  	igt_fixture {
>  		amdgpu_device_deinitialize(device);
>  		drm_close_driver(fd);

[-- Attachment #2: Type: text/html, Size: 14487 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA
@ 2023-10-20  5:21 Jesse Zhang
  2023-10-20 11:37 ` Kamil Konieczny
  2023-10-22  3:42 ` vitaly prosyak
  0 siblings, 2 replies; 8+ messages in thread
From: Jesse Zhang @ 2023-10-20  5:21 UTC (permalink / raw)
  To: igt-dev; +Cc: Tim Huang, Luben Tuikov, Alex Deucher, Christian Koenig

Issue corrupted header or slow sdma linear copy
to trigger SDMA hang test.

V3:
  - avoid generating warning,
    and optimize logical code. (Vitlaly)
  - Use existing interfaces
    to clean up code. (Vitaly)

Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Kamil Konieczny <kamil.konieczny@linux.intel.com>

Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
Signed-off-by: Tim Huang <tim.huang@amd.com>
---
 lib/amdgpu/amd_command_submission.c | 46 +++++++++------
 lib/amdgpu/amd_command_submission.h |  3 +-
 lib/amdgpu/amd_deadlock_helpers.c   | 90 +++++++++++++++++++++++++++++
 lib/amdgpu/amd_deadlock_helpers.h   |  7 +++
 tests/amdgpu/amd_basic.c            |  4 +-
 tests/amdgpu/amd_deadlock.c         | 16 +++++
 tests/amdgpu/amd_security.c         |  4 +-
 7 files changed, 148 insertions(+), 22 deletions(-)

diff --git a/lib/amdgpu/amd_command_submission.c b/lib/amdgpu/amd_command_submission.c
index 02cf9357b..b674ba640 100644
--- a/lib/amdgpu/amd_command_submission.c
+++ b/lib/amdgpu/amd_command_submission.c
@@ -18,7 +18,7 @@
  */
 
 void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type,
-				struct amdgpu_ring_context *ring_context)
+				struct amdgpu_ring_context *ring_context, int expect)
 {
 	int r;
 	uint32_t expired;
@@ -31,15 +31,23 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
 
 	amdgpu_bo_handle *all_res = alloca(sizeof(ring_context->resources[0]) * (ring_context->res_cnt + 1));
 
+	if (expect) {
+		/* allocate IB */
+		r = amdgpu_bo_alloc_and_map(device, ring_context->write_length, 4096,
+					    AMDGPU_GEM_DOMAIN_GTT, 0,
+					    &ib_result_handle, &ib_result_cpu,
+					    &ib_result_mc_address, &va_handle);
+	} else {
+		/* prepare CS */
+		igt_assert(ring_context->pm4_dw <= 1024);
+		/* allocate IB */
+		r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
+					    AMDGPU_GEM_DOMAIN_GTT, 0,
+					    &ib_result_handle, &ib_result_cpu,
+					    &ib_result_mc_address, &va_handle);
 
-	/* prepare CS */
-	igt_assert(ring_context->pm4_dw <= 1024);
 
-	/* allocate IB */
-	r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
-				    AMDGPU_GEM_DOMAIN_GTT, 0,
-				    &ib_result_handle, &ib_result_cpu,
-				    &ib_result_mc_address, &va_handle);
+	}
 	igt_assert_eq(r, 0);
 
 	/* copy PM4 packet to ring from caller */
@@ -81,9 +89,13 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
 	r = amdgpu_cs_query_fence_status(&fence_status,
 					 AMDGPU_TIMEOUT_INFINITE,
 					 0, &expired);
-	igt_assert_eq(r, 0);
-	igt_assert_eq(expired, true);
-
+	if (expect) {
+		igt_assert_neq(r, 0);
+		igt_assert_neq(expired, true);
+	} else {
+		igt_assert_eq(r, 0);
+		igt_assert_eq(expired, true);
+	}
 	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
 				 ib_result_mc_address, 4096);
 }
@@ -145,7 +157,7 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
 
 			ring_context->ring_id = ring_id;
 
-			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 			/* verify if SDMA test result meets with expected */
 			i = 0;
@@ -155,20 +167,20 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
 			} else if (ip_block->type == AMDGPU_HW_IP_GFX) {
 				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 			} else if (ip_block->type == AMDGPU_HW_IP_DMA) {
 				/* restore the bo_cpu to compare */
 				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
 				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 				/* restore again, here dest_data should be */
 				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
 				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 				/* here bo_cpu[0] should be unchanged, still is 0x12345678, otherwise failed*/
 				igt_assert_eq(ring_context->bo_cpu[0], ring_context->bo_cpu_origin);
 			}
@@ -236,7 +248,7 @@ void amdgpu_command_submission_const_fill_helper(amdgpu_device_handle device,
 			/* fulfill PM4: test DMA const fill */
 			ip_block->funcs->const_fill(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 			/* verify if SDMA test result meets with expected */
 			r = ip_block->funcs->compare(ip_block->funcs, ring_context, 4);
@@ -322,7 +334,7 @@ void amdgpu_command_submission_copy_linear_helper(amdgpu_device_handle device,
 
 				ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 				/* verify if SDMA test result meets with expected */
 				r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, 4);
diff --git a/lib/amdgpu/amd_command_submission.h b/lib/amdgpu/amd_command_submission.h
index 58f3221a3..44f0cc958 100644
--- a/lib/amdgpu/amd_command_submission.h
+++ b/lib/amdgpu/amd_command_submission.h
@@ -29,7 +29,8 @@
 #include "amd_ip_blocks.h"
 
 void amdgpu_test_exec_cs_helper(amdgpu_device_handle device,
-				unsigned int ip_type, struct amdgpu_ring_context *ring_context);
+				unsigned int ip_type, struct amdgpu_ring_context *ring_context,
+				int expect);
 
 void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
 						   const struct amdgpu_ip_block_version *ip_block,
diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
index a6be5f02a..5b7d51d94 100644
--- a/lib/amdgpu/amd_deadlock_helpers.c
+++ b/lib/amdgpu/amd_deadlock_helpers.c
@@ -13,6 +13,7 @@
 #include "amd_memory.h"
 #include "amd_deadlock_helpers.h"
 #include "amd_ip_blocks.h"
+#include "lib/amdgpu/amd_command_submission.h"
 
 #define MAX_JOB_COUNT 200
 
@@ -248,3 +249,92 @@ bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned i
 	free_cmd_base(base_cmd);
 	amdgpu_cs_ctx_free(context_handle);
 }
+
+void
+amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type)
+{
+	int j, r;
+	uint32_t *ptr, offset;
+	struct amdgpu_ring_context *ring_context;
+	struct amdgpu_cmd_base *base_cmd = get_cmd_base();
+	const struct amdgpu_ip_block_version *ip_block = get_ip_block(device_handle, AMDGPU_HW_IP_DMA);
+
+	ring_context = calloc(1, sizeof(*ring_context));
+	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
+		ring_context->write_length = 4096;
+		ring_context->pm4 = calloc(256, sizeof(*ring_context->pm4));
+		ring_context->pm4_size = 256;
+	} else {
+		ring_context->write_length = 1024 * 0x20000;
+		ring_context->pm4 = calloc(256 * 0x20000, sizeof(*ring_context->pm4));
+		ring_context->pm4_size = 256 * 0x20000;
+	}
+	ring_context->secure = false;
+	ring_context->res_cnt = 2;
+	igt_assert(ring_context->pm4);
+
+	r = amdgpu_cs_ctx_create(device_handle, &ring_context->context_handle);
+	igt_assert_eq(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle, ring_context->write_length, 4096,
+					AMDGPU_GEM_DOMAIN_GTT, 0,
+					&ring_context->bo, (void **)&ring_context->bo_cpu,
+					&ring_context->bo_mc, &ring_context->va_handle);
+	igt_assert_eq(r, 0);
+
+	/* set bo */
+	memset((void *)ring_context->bo_cpu, 0, ring_context->write_length);
+	r = amdgpu_bo_alloc_and_map(device_handle,
+				    ring_context->write_length, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT,
+				    0, &ring_context->bo2,
+				    (void **)&ring_context->bo2_cpu, &ring_context->bo_mc2,
+				    &ring_context->va_handle2);
+	igt_assert_eq(r, 0);
+
+	/* set bo2 */
+	memset((void *)ring_context->bo2_cpu, 0, ring_context->write_length);
+	ring_context->resources[0] = ring_context->bo;
+	ring_context->resources[1] = ring_context->bo2;
+	base_cmd->attach_buf(base_cmd, ring_context->pm4, ring_context->write_length);
+
+	/* fulfill PM4: with bad copy linear header */
+	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
+		ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
+		base_cmd->emit_at_offset(base_cmd, 0x23decd3d, 0);
+	} else {
+		/* Save initialization pm4 */
+		ptr = ring_context->pm4;
+		for (j = 1; j < 0x20000; j++) {
+			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
+			ring_context->pm4 += ring_context->pm4_dw;
+			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
+
+			offset = ring_context->pm4_dw * 2 * j;
+			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc2), (offset - 4));
+			base_cmd->emit_at_offset(base_cmd,
+					((0xffffffff00000000 & ring_context->bo_mc2) >> 32), (offset - 3));
+			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc), (offset - 2));
+			base_cmd->emit_at_offset(base_cmd,
+					((0xffffffff00000000 & ring_context->bo_mc) >> 32), (offset - 1));
+			ring_context->pm4 += ring_context->pm4_dw;
+		}
+		/* restore pm4 */
+		ring_context->pm4 = ptr;
+		/* update the total pm4_dw */
+		ring_context->pm4_dw = ring_context->pm4_dw * 2 * j;
+	}
+
+	amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 1);
+	amdgpu_bo_unmap_and_free(ring_context->bo, ring_context->va_handle, ring_context->bo_mc,
+						 ring_context->write_length);
+	amdgpu_bo_unmap_and_free(ring_context->bo2, ring_context->va_handle2, ring_context->bo_mc2,
+						 ring_context->write_length);
+	/* clean resources */
+	free(ring_context->pm4);
+	/* end of test */
+	//r = amdgpu_cs_ctx_free(context_handle);
+	r = amdgpu_cs_ctx_free(ring_context->context_handle);
+	igt_assert_eq(r, 0);
+	free_cmd_base(base_cmd);
+}
diff --git a/lib/amdgpu/amd_deadlock_helpers.h b/lib/amdgpu/amd_deadlock_helpers.h
index cc8eba7f7..9c0d245a9 100644
--- a/lib/amdgpu/amd_deadlock_helpers.h
+++ b/lib/amdgpu/amd_deadlock_helpers.h
@@ -24,11 +24,18 @@
 #ifndef __AMD_DEADLOCK_HELPERS_H__
 #define __AMD_DEADLOCK_HELPERS_H__
 
+enum  hang_type {
+	DMA_CORRUPTED_HEADER_HANG,
+	DMA_SLOW_LINEARCOPY_HANG
+};
+
 void
 amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned ip_type);
 
 void
 bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned ip_type);
 
+void
+amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type);
 #endif
 
diff --git a/tests/amdgpu/amd_basic.c b/tests/amdgpu/amd_basic.c
index 88fdbd980..70e45649d 100644
--- a/tests/amdgpu/amd_basic.c
+++ b/tests/amdgpu/amd_basic.c
@@ -307,7 +307,7 @@ static void amdgpu_userptr_test(amdgpu_device_handle device)
 
 	ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 	r = ip_block->funcs->compare(ip_block->funcs, ring_context, 1);
 	igt_assert_eq(r, 0);
@@ -412,7 +412,7 @@ amdgpu_bo_eviction_test(amdgpu_device_handle device_handle)
 			ring_context->resources[2] = ring_context->boa_vram[loop2];
 			ring_context->resources[3] = ring_context->boa_gtt[loop2];
 			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
-			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context);
+			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 0);
 			/* fulfill PM4: test DMA copy linear */
 			r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, sdma_write_length);
 			igt_assert_eq(r, 0);
diff --git a/tests/amdgpu/amd_deadlock.c b/tests/amdgpu/amd_deadlock.c
index 6147b7636..dc7ec4366 100644
--- a/tests/amdgpu/amd_deadlock.c
+++ b/tests/amdgpu/amd_deadlock.c
@@ -77,6 +77,22 @@ igt_main
 		}
 	}
 
+	igt_describe("Test-GPU-reset-by-sdma-corrupted-header-with-jobs");
+	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-corrupted-header-test") {
+		if (arr_cap[AMD_IP_DMA]) {
+			igt_dynamic_f("amdgpu-deadlock-sdma-corrupted-header-test")
+			amdgpu_hang_sdma_helper(device, DMA_CORRUPTED_HEADER_HANG);
+		}
+	}
+
+	igt_describe("Test-GPU-reset-by-sdma-slow-linear-copy-with-jobs");
+	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-slow-linear-copy") {
+		if (arr_cap[AMD_IP_DMA]) {
+			igt_dynamic_f("amdgpu-deadlock-sdma-slow-linear-copy")
+			amdgpu_hang_sdma_helper(device, DMA_SLOW_LINEARCOPY_HANG);
+		}
+	}
+
 	igt_fixture {
 		amdgpu_device_deinitialize(device);
 		drm_close_driver(fd);
diff --git a/tests/amdgpu/amd_security.c b/tests/amdgpu/amd_security.c
index 46180df2e..1a7eba9eb 100644
--- a/tests/amdgpu/amd_security.c
+++ b/tests/amdgpu/amd_security.c
@@ -110,7 +110,7 @@ amdgpu_bo_lcopy(amdgpu_device_handle device,
 
 	amdgpu_sdma_lcopy(ring_context->pm4, ring_context->bo_mc2,
 			ring_context->bo_mc, size, secure);
-	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 	free(ring_context->pm4);
 }
 
@@ -155,7 +155,7 @@ amdgpu_bo_move(amdgpu_device_handle device, int fd,
 	 * it to the desired location.
 	 */
 	amdgpu_sdma_nop(ring_context->pm4, PACKET_NOP_SIZE);
-	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 	free(ring_context->pm4);
 }
 
-- 
2.25.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA
  2023-10-20  5:21 [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA Jesse Zhang
@ 2023-10-20 11:37 ` Kamil Konieczny
  2023-10-20 16:08   ` vitaly prosyak
  2023-10-22  3:42 ` vitaly prosyak
  1 sibling, 1 reply; 8+ messages in thread
From: Kamil Konieczny @ 2023-10-20 11:37 UTC (permalink / raw)
  To: igt-dev; +Cc: Tim Huang, Luben Tuikov, Alex Deucher, Christian Koenig

Hi Jesse,
On 2023-10-20 at 13:21:47 +0800, Jesse Zhang wrote:
> Issue corrupted header or slow sdma linear copy
> to trigger SDMA hang test.
> 
> V3:
>   - avoid generating warning,
>     and optimize logical code. (Vitlaly)
>   - Use existing interfaces
>     to clean up code. (Vitaly)
> 
> Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
> Cc: Luben Tuikov <luben.tuikov@amd.com>
> Cc: Alex Deucher <alexander.deucher@amd.com>
> Cc: Christian Koenig <christian.koenig@amd.com>
> Cc: Kamil Konieczny <kamil.konieczny@linux.intel.com>
> 
> Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
> Signed-off-by: Tim Huang <tim.huang@amd.com>
> ---
>  lib/amdgpu/amd_command_submission.c | 46 +++++++++------
>  lib/amdgpu/amd_command_submission.h |  3 +-
>  lib/amdgpu/amd_deadlock_helpers.c   | 90 +++++++++++++++++++++++++++++
>  lib/amdgpu/amd_deadlock_helpers.h   |  7 +++
>  tests/amdgpu/amd_basic.c            |  4 +-
>  tests/amdgpu/amd_deadlock.c         | 16 +++++
>  tests/amdgpu/amd_security.c         |  4 +-
>  7 files changed, 148 insertions(+), 22 deletions(-)
> 
> diff --git a/lib/amdgpu/amd_command_submission.c b/lib/amdgpu/amd_command_submission.c
> index 02cf9357b..b674ba640 100644
> --- a/lib/amdgpu/amd_command_submission.c
> +++ b/lib/amdgpu/amd_command_submission.c
> @@ -18,7 +18,7 @@
>   */
>  
>  void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type,
> -				struct amdgpu_ring_context *ring_context)
> +				struct amdgpu_ring_context *ring_context, int expect)
>  {
>  	int r;
>  	uint32_t expired;
> @@ -31,15 +31,23 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
>  
>  	amdgpu_bo_handle *all_res = alloca(sizeof(ring_context->resources[0]) * (ring_context->res_cnt + 1));
>  
> +	if (expect) {
> +		/* allocate IB */
> +		r = amdgpu_bo_alloc_and_map(device, ring_context->write_length, 4096,
> +					    AMDGPU_GEM_DOMAIN_GTT, 0,
> +					    &ib_result_handle, &ib_result_cpu,
> +					    &ib_result_mc_address, &va_handle);
> +	} else {
> +		/* prepare CS */
> +		igt_assert(ring_context->pm4_dw <= 1024);
> +		/* allocate IB */
> +		r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
> +					    AMDGPU_GEM_DOMAIN_GTT, 0,
> +					    &ib_result_handle, &ib_result_cpu,
> +					    &ib_result_mc_address, &va_handle);
>  
> -	/* prepare CS */
> -	igt_assert(ring_context->pm4_dw <= 1024);
>  
> -	/* allocate IB */
> -	r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
> -				    AMDGPU_GEM_DOMAIN_GTT, 0,
> -				    &ib_result_handle, &ib_result_cpu,
> -				    &ib_result_mc_address, &va_handle);
> +	}
>  	igt_assert_eq(r, 0);
>  
>  	/* copy PM4 packet to ring from caller */
> @@ -81,9 +89,13 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
>  	r = amdgpu_cs_query_fence_status(&fence_status,
>  					 AMDGPU_TIMEOUT_INFINITE,
>  					 0, &expired);
> -	igt_assert_eq(r, 0);
> -	igt_assert_eq(expired, true);
> -
> +	if (expect) {
> +		igt_assert_neq(r, 0);
> +		igt_assert_neq(expired, true);
> +	} else {
> +		igt_assert_eq(r, 0);
> +		igt_assert_eq(expired, true);
> +	}
>  	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
>  				 ib_result_mc_address, 4096);
>  }
> @@ -145,7 +157,7 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>  
>  			ring_context->ring_id = ring_id;
>  
> -			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  			/* verify if SDMA test result meets with expected */
>  			i = 0;
> @@ -155,20 +167,20 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>  			} else if (ip_block->type == AMDGPU_HW_IP_GFX) {
>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  			} else if (ip_block->type == AMDGPU_HW_IP_DMA) {
>  				/* restore the bo_cpu to compare */
>  				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  				/* restore again, here dest_data should be */
>  				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  				/* here bo_cpu[0] should be unchanged, still is 0x12345678, otherwise failed*/
>  				igt_assert_eq(ring_context->bo_cpu[0], ring_context->bo_cpu_origin);
>  			}
> @@ -236,7 +248,7 @@ void amdgpu_command_submission_const_fill_helper(amdgpu_device_handle device,
>  			/* fulfill PM4: test DMA const fill */
>  			ip_block->funcs->const_fill(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  			/* verify if SDMA test result meets with expected */
>  			r = ip_block->funcs->compare(ip_block->funcs, ring_context, 4);
> @@ -322,7 +334,7 @@ void amdgpu_command_submission_copy_linear_helper(amdgpu_device_handle device,
>  
>  				ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  				/* verify if SDMA test result meets with expected */
>  				r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, 4);
> diff --git a/lib/amdgpu/amd_command_submission.h b/lib/amdgpu/amd_command_submission.h
> index 58f3221a3..44f0cc958 100644
> --- a/lib/amdgpu/amd_command_submission.h
> +++ b/lib/amdgpu/amd_command_submission.h
> @@ -29,7 +29,8 @@
>  #include "amd_ip_blocks.h"
>  
>  void amdgpu_test_exec_cs_helper(amdgpu_device_handle device,
> -				unsigned int ip_type, struct amdgpu_ring_context *ring_context);
> +				unsigned int ip_type, struct amdgpu_ring_context *ring_context,
> +				int expect);
>  
>  void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>  						   const struct amdgpu_ip_block_version *ip_block,
> diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
> index a6be5f02a..5b7d51d94 100644
> --- a/lib/amdgpu/amd_deadlock_helpers.c
> +++ b/lib/amdgpu/amd_deadlock_helpers.c
> @@ -13,6 +13,7 @@
>  #include "amd_memory.h"
>  #include "amd_deadlock_helpers.h"
>  #include "amd_ip_blocks.h"
> +#include "lib/amdgpu/amd_command_submission.h"
>  
>  #define MAX_JOB_COUNT 200
>  
> @@ -248,3 +249,92 @@ bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned i
>  	free_cmd_base(base_cmd);
>  	amdgpu_cs_ctx_free(context_handle);
>  }
> +
> +void
> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type)
> +{
> +	int j, r;
> +	uint32_t *ptr, offset;
> +	struct amdgpu_ring_context *ring_context;
> +	struct amdgpu_cmd_base *base_cmd = get_cmd_base();
> +	const struct amdgpu_ip_block_version *ip_block = get_ip_block(device_handle, AMDGPU_HW_IP_DMA);
> +
> +	ring_context = calloc(1, sizeof(*ring_context));
> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
> +		ring_context->write_length = 4096;
> +		ring_context->pm4 = calloc(256, sizeof(*ring_context->pm4));
> +		ring_context->pm4_size = 256;
> +	} else {
> +		ring_context->write_length = 1024 * 0x20000;
> +		ring_context->pm4 = calloc(256 * 0x20000, sizeof(*ring_context->pm4));
> +		ring_context->pm4_size = 256 * 0x20000;
> +	}
> +	ring_context->secure = false;
> +	ring_context->res_cnt = 2;
> +	igt_assert(ring_context->pm4);
> +
> +	r = amdgpu_cs_ctx_create(device_handle, &ring_context->context_handle);
> +	igt_assert_eq(r, 0);
> +
> +	r = amdgpu_bo_alloc_and_map(device_handle, ring_context->write_length, 4096,
> +					AMDGPU_GEM_DOMAIN_GTT, 0,
> +					&ring_context->bo, (void **)&ring_context->bo_cpu,
> +					&ring_context->bo_mc, &ring_context->va_handle);
> +	igt_assert_eq(r, 0);
> +
> +	/* set bo */
> +	memset((void *)ring_context->bo_cpu, 0, ring_context->write_length);
> +	r = amdgpu_bo_alloc_and_map(device_handle,
> +				    ring_context->write_length, 4096,
> +				    AMDGPU_GEM_DOMAIN_GTT,
> +				    0, &ring_context->bo2,
> +				    (void **)&ring_context->bo2_cpu, &ring_context->bo_mc2,
> +				    &ring_context->va_handle2);
> +	igt_assert_eq(r, 0);
> +
> +	/* set bo2 */
> +	memset((void *)ring_context->bo2_cpu, 0, ring_context->write_length);
> +	ring_context->resources[0] = ring_context->bo;
> +	ring_context->resources[1] = ring_context->bo2;
> +	base_cmd->attach_buf(base_cmd, ring_context->pm4, ring_context->write_length);
> +
> +	/* fulfill PM4: with bad copy linear header */
> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
> +		ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> +		base_cmd->emit_at_offset(base_cmd, 0x23decd3d, 0);
> +	} else {
> +		/* Save initialization pm4 */
> +		ptr = ring_context->pm4;
> +		for (j = 1; j < 0x20000; j++) {
> +			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> +			ring_context->pm4 += ring_context->pm4_dw;
> +			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> +
> +			offset = ring_context->pm4_dw * 2 * j;
> +			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc2), (offset - 4));
> +			base_cmd->emit_at_offset(base_cmd,
> +					((0xffffffff00000000 & ring_context->bo_mc2) >> 32), (offset - 3));
> +			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc), (offset - 2));
> +			base_cmd->emit_at_offset(base_cmd,
> +					((0xffffffff00000000 & ring_context->bo_mc) >> 32), (offset - 1));
> +			ring_context->pm4 += ring_context->pm4_dw;
> +		}
> +		/* restore pm4 */
> +		ring_context->pm4 = ptr;
> +		/* update the total pm4_dw */
> +		ring_context->pm4_dw = ring_context->pm4_dw * 2 * j;
> +	}
> +
> +	amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 1);
> +	amdgpu_bo_unmap_and_free(ring_context->bo, ring_context->va_handle, ring_context->bo_mc,
> +						 ring_context->write_length);
> +	amdgpu_bo_unmap_and_free(ring_context->bo2, ring_context->va_handle2, ring_context->bo_mc2,
> +						 ring_context->write_length);
> +	/* clean resources */
> +	free(ring_context->pm4);
> +	/* end of test */
> +	//r = amdgpu_cs_ctx_free(context_handle);
> +	r = amdgpu_cs_ctx_free(ring_context->context_handle);
> +	igt_assert_eq(r, 0);
> +	free_cmd_base(base_cmd);
> +}
> diff --git a/lib/amdgpu/amd_deadlock_helpers.h b/lib/amdgpu/amd_deadlock_helpers.h
> index cc8eba7f7..9c0d245a9 100644
> --- a/lib/amdgpu/amd_deadlock_helpers.h
> +++ b/lib/amdgpu/amd_deadlock_helpers.h
> @@ -24,11 +24,18 @@
>  #ifndef __AMD_DEADLOCK_HELPERS_H__
>  #define __AMD_DEADLOCK_HELPERS_H__
>  
> +enum  hang_type {
> +	DMA_CORRUPTED_HEADER_HANG,
> +	DMA_SLOW_LINEARCOPY_HANG
> +};
> +
>  void
>  amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned ip_type);
>  
>  void
>  bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned ip_type);
>  
> +void
> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type);
>  #endif
>  
> diff --git a/tests/amdgpu/amd_basic.c b/tests/amdgpu/amd_basic.c
> index 88fdbd980..70e45649d 100644
> --- a/tests/amdgpu/amd_basic.c
> +++ b/tests/amdgpu/amd_basic.c
> @@ -307,7 +307,7 @@ static void amdgpu_userptr_test(amdgpu_device_handle device)
>  
>  	ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  	r = ip_block->funcs->compare(ip_block->funcs, ring_context, 1);
>  	igt_assert_eq(r, 0);
> @@ -412,7 +412,7 @@ amdgpu_bo_eviction_test(amdgpu_device_handle device_handle)
>  			ring_context->resources[2] = ring_context->boa_vram[loop2];
>  			ring_context->resources[3] = ring_context->boa_gtt[loop2];
>  			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> -			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context);
> +			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 0);
>  			/* fulfill PM4: test DMA copy linear */
>  			r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, sdma_write_length);
>  			igt_assert_eq(r, 0);
> diff --git a/tests/amdgpu/amd_deadlock.c b/tests/amdgpu/amd_deadlock.c
> index 6147b7636..dc7ec4366 100644
> --- a/tests/amdgpu/amd_deadlock.c
> +++ b/tests/amdgpu/amd_deadlock.c
> @@ -77,6 +77,22 @@ igt_main
>  		}
>  	}
>  
> +	igt_describe("Test-GPU-reset-by-sdma-corrupted-header-with-jobs");
----------------------^---^-----^--^----^---------^------^----^
s/-/ /g
In descriptions you may use spaces.

> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-corrupted-header-test") {

And here it is ok - no spaces in tests names, use '-' in place of them.

> +		if (arr_cap[AMD_IP_DMA]) {
> +			igt_dynamic_f("amdgpu-deadlock-sdma-corrupted-header-test")
> +			amdgpu_hang_sdma_helper(device, DMA_CORRUPTED_HEADER_HANG);
> +		}
> +	}
> +
> +	igt_describe("Test-GPU-reset-by-sdma-slow-linear-copy-with-jobs");
----------------------^---^...
Same here.

Regards,
Kamil

> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-slow-linear-copy") {
> +		if (arr_cap[AMD_IP_DMA]) {
> +			igt_dynamic_f("amdgpu-deadlock-sdma-slow-linear-copy")
> +			amdgpu_hang_sdma_helper(device, DMA_SLOW_LINEARCOPY_HANG);
> +		}
> +	}
> +
>  	igt_fixture {
>  		amdgpu_device_deinitialize(device);
>  		drm_close_driver(fd);
> diff --git a/tests/amdgpu/amd_security.c b/tests/amdgpu/amd_security.c
> index 46180df2e..1a7eba9eb 100644
> --- a/tests/amdgpu/amd_security.c
> +++ b/tests/amdgpu/amd_security.c
> @@ -110,7 +110,7 @@ amdgpu_bo_lcopy(amdgpu_device_handle device,
>  
>  	amdgpu_sdma_lcopy(ring_context->pm4, ring_context->bo_mc2,
>  			ring_context->bo_mc, size, secure);
> -	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  	free(ring_context->pm4);
>  }
>  
> @@ -155,7 +155,7 @@ amdgpu_bo_move(amdgpu_device_handle device, int fd,
>  	 * it to the desired location.
>  	 */
>  	amdgpu_sdma_nop(ring_context->pm4, PACKET_NOP_SIZE);
> -	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  	free(ring_context->pm4);
>  }
>  
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA
  2023-10-20 11:37 ` Kamil Konieczny
@ 2023-10-20 16:08   ` vitaly prosyak
  0 siblings, 0 replies; 8+ messages in thread
From: vitaly prosyak @ 2023-10-20 16:08 UTC (permalink / raw)
  To: Kamil Konieczny, igt-dev, Jesse Zhang, Vitaly Prosyak,
	Alex Deucher, Luben Tuikov, Christian Koenig, Tim Huang


On 2023-10-20 07:37, Kamil Konieczny wrote:
> Hi Jesse,
> On 2023-10-20 at 13:21:47 +0800, Jesse Zhang wrote:
>> Issue corrupted header or slow sdma linear copy
>> to trigger SDMA hang test.
>>
>> V3:
>>   - avoid generating warning,
>>     and optimize logical code. (Vitlaly)
>>   - Use existing interfaces
>>     to clean up code. (Vitaly)
>>
>> Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
>> Cc: Luben Tuikov <luben.tuikov@amd.com>
>> Cc: Alex Deucher <alexander.deucher@amd.com>
>> Cc: Christian Koenig <christian.koenig@amd.com>
>> Cc: Kamil Konieczny <kamil.konieczny@linux.intel.com>
>>
>> Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
>> Signed-off-by: Tim Huang <tim.huang@amd.com>
>> ---
>>  lib/amdgpu/amd_command_submission.c | 46 +++++++++------
>>  lib/amdgpu/amd_command_submission.h |  3 +-
>>  lib/amdgpu/amd_deadlock_helpers.c   | 90 +++++++++++++++++++++++++++++
>>  lib/amdgpu/amd_deadlock_helpers.h   |  7 +++
>>  tests/amdgpu/amd_basic.c            |  4 +-
>>  tests/amdgpu/amd_deadlock.c         | 16 +++++
>>  tests/amdgpu/amd_security.c         |  4 +-
>>  7 files changed, 148 insertions(+), 22 deletions(-)
>>
>> diff --git a/lib/amdgpu/amd_command_submission.c b/lib/amdgpu/amd_command_submission.c
>> index 02cf9357b..b674ba640 100644
>> --- a/lib/amdgpu/amd_command_submission.c
>> +++ b/lib/amdgpu/amd_command_submission.c
>> @@ -18,7 +18,7 @@
>>   */
>>  
>>  void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type,
>> -				struct amdgpu_ring_context *ring_context)
>> +				struct amdgpu_ring_context *ring_context, int expect)
>>  {
>>  	int r;
>>  	uint32_t expired;
>> @@ -31,15 +31,23 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
>>  
>>  	amdgpu_bo_handle *all_res = alloca(sizeof(ring_context->resources[0]) * (ring_context->res_cnt + 1));
>>  
>> +	if (expect) {
>> +		/* allocate IB */
>> +		r = amdgpu_bo_alloc_and_map(device, ring_context->write_length, 4096,
>> +					    AMDGPU_GEM_DOMAIN_GTT, 0,
>> +					    &ib_result_handle, &ib_result_cpu,
>> +					    &ib_result_mc_address, &va_handle);
>> +	} else {
>> +		/* prepare CS */
>> +		igt_assert(ring_context->pm4_dw <= 1024);
>> +		/* allocate IB */
>> +		r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
>> +					    AMDGPU_GEM_DOMAIN_GTT, 0,
>> +					    &ib_result_handle, &ib_result_cpu,
>> +					    &ib_result_mc_address, &va_handle);
>>  
>> -	/* prepare CS */
>> -	igt_assert(ring_context->pm4_dw <= 1024);
>>  
>> -	/* allocate IB */
>> -	r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
>> -				    AMDGPU_GEM_DOMAIN_GTT, 0,
>> -				    &ib_result_handle, &ib_result_cpu,
>> -				    &ib_result_mc_address, &va_handle);
>> +	}
>>  	igt_assert_eq(r, 0);
>>  
>>  	/* copy PM4 packet to ring from caller */
>> @@ -81,9 +89,13 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
>>  	r = amdgpu_cs_query_fence_status(&fence_status,
>>  					 AMDGPU_TIMEOUT_INFINITE,
>>  					 0, &expired);
>> -	igt_assert_eq(r, 0);
>> -	igt_assert_eq(expired, true);
>> -
>> +	if (expect) {
>> +		igt_assert_neq(r, 0);
>> +		igt_assert_neq(expired, true);
>> +	} else {
>> +		igt_assert_eq(r, 0);
>> +		igt_assert_eq(expired, true);
>> +	}
>>  	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
>>  				 ib_result_mc_address, 4096);
>>  }
>> @@ -145,7 +157,7 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>>  
>>  			ring_context->ring_id = ring_id;
>>  
>> -			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  
>>  			/* verify if SDMA test result meets with expected */
>>  			i = 0;
>> @@ -155,20 +167,20 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>>  			} else if (ip_block->type == AMDGPU_HW_IP_GFX) {
>>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>>  
>> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  
>>  			} else if (ip_block->type == AMDGPU_HW_IP_DMA) {
>>  				/* restore the bo_cpu to compare */
>>  				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
>>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>>  
>> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  
>>  				/* restore again, here dest_data should be */
>>  				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
>>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>>  
>> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  				/* here bo_cpu[0] should be unchanged, still is 0x12345678, otherwise failed*/
>>  				igt_assert_eq(ring_context->bo_cpu[0], ring_context->bo_cpu_origin);
>>  			}
>> @@ -236,7 +248,7 @@ void amdgpu_command_submission_const_fill_helper(amdgpu_device_handle device,
>>  			/* fulfill PM4: test DMA const fill */
>>  			ip_block->funcs->const_fill(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>>  
>> -			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  
>>  			/* verify if SDMA test result meets with expected */
>>  			r = ip_block->funcs->compare(ip_block->funcs, ring_context, 4);
>> @@ -322,7 +334,7 @@ void amdgpu_command_submission_copy_linear_helper(amdgpu_device_handle device,
>>  
>>  				ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>>  
>> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  
>>  				/* verify if SDMA test result meets with expected */
>>  				r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, 4);
>> diff --git a/lib/amdgpu/amd_command_submission.h b/lib/amdgpu/amd_command_submission.h
>> index 58f3221a3..44f0cc958 100644
>> --- a/lib/amdgpu/amd_command_submission.h
>> +++ b/lib/amdgpu/amd_command_submission.h
>> @@ -29,7 +29,8 @@
>>  #include "amd_ip_blocks.h"
>>  
>>  void amdgpu_test_exec_cs_helper(amdgpu_device_handle device,
>> -				unsigned int ip_type, struct amdgpu_ring_context *ring_context);
>> +				unsigned int ip_type, struct amdgpu_ring_context *ring_context,
>> +				int expect);
>>  
>>  void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>>  						   const struct amdgpu_ip_block_version *ip_block,
>> diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
>> index a6be5f02a..5b7d51d94 100644
>> --- a/lib/amdgpu/amd_deadlock_helpers.c
>> +++ b/lib/amdgpu/amd_deadlock_helpers.c
>> @@ -13,6 +13,7 @@
>>  #include "amd_memory.h"
>>  #include "amd_deadlock_helpers.h"
>>  #include "amd_ip_blocks.h"
>> +#include "lib/amdgpu/amd_command_submission.h"
>>  
>>  #define MAX_JOB_COUNT 200
>>  
>> @@ -248,3 +249,92 @@ bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned i
>>  	free_cmd_base(base_cmd);
>>  	amdgpu_cs_ctx_free(context_handle);
>>  }
>> +
>> +void
>> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type)
>> +{
>> +	int j, r;
>> +	uint32_t *ptr, offset;
>> +	struct amdgpu_ring_context *ring_context;
>> +	struct amdgpu_cmd_base *base_cmd = get_cmd_base();
>> +	const struct amdgpu_ip_block_version *ip_block = get_ip_block(device_handle, AMDGPU_HW_IP_DMA);
>> +
>> +	ring_context = calloc(1, sizeof(*ring_context));
>> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
>> +		ring_context->write_length = 4096;
>> +		ring_context->pm4 = calloc(256, sizeof(*ring_context->pm4));
>> +		ring_context->pm4_size = 256;
>> +	} else {
>> +		ring_context->write_length = 1024 * 0x20000;
>> +		ring_context->pm4 = calloc(256 * 0x20000, sizeof(*ring_context->pm4));
>> +		ring_context->pm4_size = 256 * 0x20000;
>> +	}
>> +	ring_context->secure = false;
>> +	ring_context->res_cnt = 2;
>> +	igt_assert(ring_context->pm4);
>> +
>> +	r = amdgpu_cs_ctx_create(device_handle, &ring_context->context_handle);
>> +	igt_assert_eq(r, 0);
>> +
>> +	r = amdgpu_bo_alloc_and_map(device_handle, ring_context->write_length, 4096,
>> +					AMDGPU_GEM_DOMAIN_GTT, 0,
>> +					&ring_context->bo, (void **)&ring_context->bo_cpu,
>> +					&ring_context->bo_mc, &ring_context->va_handle);
>> +	igt_assert_eq(r, 0);
>> +
>> +	/* set bo */
>> +	memset((void *)ring_context->bo_cpu, 0, ring_context->write_length);
>> +	r = amdgpu_bo_alloc_and_map(device_handle,
>> +				    ring_context->write_length, 4096,
>> +				    AMDGPU_GEM_DOMAIN_GTT,
>> +				    0, &ring_context->bo2,
>> +				    (void **)&ring_context->bo2_cpu, &ring_context->bo_mc2,
>> +				    &ring_context->va_handle2);
>> +	igt_assert_eq(r, 0);
>> +
>> +	/* set bo2 */
>> +	memset((void *)ring_context->bo2_cpu, 0, ring_context->write_length);
>> +	ring_context->resources[0] = ring_context->bo;
>> +	ring_context->resources[1] = ring_context->bo2;
>> +	base_cmd->attach_buf(base_cmd, ring_context->pm4, ring_context->write_length);
>> +
>> +	/* fulfill PM4: with bad copy linear header */
>> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
>> +		ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>> +		base_cmd->emit_at_offset(base_cmd, 0x23decd3d, 0);
>> +	} else {
>> +		/* Save initialization pm4 */
>> +		ptr = ring_context->pm4;
>> +		for (j = 1; j < 0x20000; j++) {
>> +			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>> +			ring_context->pm4 += ring_context->pm4_dw;
>> +			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>> +
>> +			offset = ring_context->pm4_dw * 2 * j;
>> +			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc2), (offset - 4));
>> +			base_cmd->emit_at_offset(base_cmd,
>> +					((0xffffffff00000000 & ring_context->bo_mc2) >> 32), (offset - 3));
>> +			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc), (offset - 2));
>> +			base_cmd->emit_at_offset(base_cmd,
>> +					((0xffffffff00000000 & ring_context->bo_mc) >> 32), (offset - 1));
>> +			ring_context->pm4 += ring_context->pm4_dw;
>> +		}
>> +		/* restore pm4 */
>> +		ring_context->pm4 = ptr;
>> +		/* update the total pm4_dw */
>> +		ring_context->pm4_dw = ring_context->pm4_dw * 2 * j;
>> +	}
>> +
>> +	amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 1);
>> +	amdgpu_bo_unmap_and_free(ring_context->bo, ring_context->va_handle, ring_context->bo_mc,
>> +						 ring_context->write_length);
>> +	amdgpu_bo_unmap_and_free(ring_context->bo2, ring_context->va_handle2, ring_context->bo_mc2,
>> +						 ring_context->write_length);
>> +	/* clean resources */
>> +	free(ring_context->pm4);
>> +	/* end of test */
>> +	//r = amdgpu_cs_ctx_free(context_handle);
>> +	r = amdgpu_cs_ctx_free(ring_context->context_handle);
>> +	igt_assert_eq(r, 0);
>> +	free_cmd_base(base_cmd);
>> +}
>> diff --git a/lib/amdgpu/amd_deadlock_helpers.h b/lib/amdgpu/amd_deadlock_helpers.h
>> index cc8eba7f7..9c0d245a9 100644
>> --- a/lib/amdgpu/amd_deadlock_helpers.h
>> +++ b/lib/amdgpu/amd_deadlock_helpers.h
>> @@ -24,11 +24,18 @@
>>  #ifndef __AMD_DEADLOCK_HELPERS_H__
>>  #define __AMD_DEADLOCK_HELPERS_H__
>>  
>> +enum  hang_type {
>> +	DMA_CORRUPTED_HEADER_HANG,
>> +	DMA_SLOW_LINEARCOPY_HANG
>> +};
>> +
>>  void
>>  amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned ip_type);
>>  
>>  void
>>  bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned ip_type);
>>  
>> +void
>> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type);
>>  #endif
>>  
>> diff --git a/tests/amdgpu/amd_basic.c b/tests/amdgpu/amd_basic.c
>> index 88fdbd980..70e45649d 100644
>> --- a/tests/amdgpu/amd_basic.c
>> +++ b/tests/amdgpu/amd_basic.c
>> @@ -307,7 +307,7 @@ static void amdgpu_userptr_test(amdgpu_device_handle device)
>>  
>>  	ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>>  
>> -	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  
>>  	r = ip_block->funcs->compare(ip_block->funcs, ring_context, 1);
>>  	igt_assert_eq(r, 0);
>> @@ -412,7 +412,7 @@ amdgpu_bo_eviction_test(amdgpu_device_handle device_handle)
>>  			ring_context->resources[2] = ring_context->boa_vram[loop2];
>>  			ring_context->resources[3] = ring_context->boa_gtt[loop2];
>>  			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>> -			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context);
>> +			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 0);
>>  			/* fulfill PM4: test DMA copy linear */
>>  			r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, sdma_write_length);
>>  			igt_assert_eq(r, 0);
>> diff --git a/tests/amdgpu/amd_deadlock.c b/tests/amdgpu/amd_deadlock.c
>> index 6147b7636..dc7ec4366 100644
>> --- a/tests/amdgpu/amd_deadlock.c
>> +++ b/tests/amdgpu/amd_deadlock.c
>> @@ -77,6 +77,22 @@ igt_main
>>  		}
>>  	}
>>  
>> +	igt_describe("Test-GPU-reset-by-sdma-corrupted-header-with-jobs");
> ----------------------^---^-----^--^----^---------^------^----^
> s/-/ /g
> In descriptions you may use spaces.
Thanks Kamil!
>
>> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-corrupted-header-test") {
> And here it is ok - no spaces in tests names, use '-' in place of them.
It was my fault for putting '-' everywhere, basically when the test fails there is a complaint about an invalid character, now it is clear that it is related only to the test name.
>> +		if (arr_cap[AMD_IP_DMA]) {
>> +			igt_dynamic_f("amdgpu-deadlock-sdma-corrupted-header-test")
>> +			amdgpu_hang_sdma_helper(device, DMA_CORRUPTED_HEADER_HANG);
>> +		}
>> +	}
>> +
>> +	igt_describe("Test-GPU-reset-by-sdma-slow-linear-copy-with-jobs");
> ----------------------^---^...
> Same here.
>
> Regards,
> Kamil
>
>> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-slow-linear-copy") {
>> +		if (arr_cap[AMD_IP_DMA]) {
>> +			igt_dynamic_f("amdgpu-deadlock-sdma-slow-linear-copy")
>> +			amdgpu_hang_sdma_helper(device, DMA_SLOW_LINEARCOPY_HANG);
>> +		}
>> +	}
>> +
>>  	igt_fixture {
>>  		amdgpu_device_deinitialize(device);
>>  		drm_close_driver(fd);
>> diff --git a/tests/amdgpu/amd_security.c b/tests/amdgpu/amd_security.c
>> index 46180df2e..1a7eba9eb 100644
>> --- a/tests/amdgpu/amd_security.c
>> +++ b/tests/amdgpu/amd_security.c
>> @@ -110,7 +110,7 @@ amdgpu_bo_lcopy(amdgpu_device_handle device,
>>  
>>  	amdgpu_sdma_lcopy(ring_context->pm4, ring_context->bo_mc2,
>>  			ring_context->bo_mc, size, secure);
>> -	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  	free(ring_context->pm4);
>>  }
>>  
>> @@ -155,7 +155,7 @@ amdgpu_bo_move(amdgpu_device_handle device, int fd,
>>  	 * it to the desired location.
>>  	 */
>>  	amdgpu_sdma_nop(ring_context->pm4, PACKET_NOP_SIZE);
>> -	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
>> +	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>>  	free(ring_context->pm4);
>>  }
>>  
>> -- 
>> 2.25.1
>>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA
  2023-10-20  5:21 [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA Jesse Zhang
  2023-10-20 11:37 ` Kamil Konieczny
@ 2023-10-22  3:42 ` vitaly prosyak
  1 sibling, 0 replies; 8+ messages in thread
From: vitaly prosyak @ 2023-10-22  3:42 UTC (permalink / raw)
  To: Jesse Zhang, igt-dev
  Cc: Tim Huang, Luben Tuikov, Alex Deucher, Christian Koenig

[-- Attachment #1: Type: text/plain, Size: 15188 bytes --]

Hi Jesse,

Please, see below to add a couple of 'defines' and additional comments for clarification.

With this Reviewed-by: Vitaly Prosyak <vitaly.prosyak@amd.com>

I validated on SIENNA_CICHLID, works fine!

Thanks, Vitaly

On 2023-10-20 01:21, Jesse Zhang wrote:
> Issue corrupted header or slow sdma linear copy
> to trigger SDMA hang test.
>
> V3:
>   - avoid generating warning,
>     and optimize logical code. (Vitaly)
>   - Use existing interfaces
>     to clean up code. (Vitaly)
>
> Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
> Cc: Luben Tuikov <luben.tuikov@amd.com>
> Cc: Alex Deucher <alexander.deucher@amd.com>
> Cc: Christian Koenig <christian.koenig@amd.com>
> Cc: Kamil Konieczny <kamil.konieczny@linux.intel.com>
>
> Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
> Signed-off-by: Tim Huang <tim.huang@amd.com>
> ---
>  lib/amdgpu/amd_command_submission.c | 46 +++++++++------
>  lib/amdgpu/amd_command_submission.h |  3 +-
>  lib/amdgpu/amd_deadlock_helpers.c   | 90 +++++++++++++++++++++++++++++
>  lib/amdgpu/amd_deadlock_helpers.h   |  7 +++
>  tests/amdgpu/amd_basic.c            |  4 +-
>  tests/amdgpu/amd_deadlock.c         | 16 +++++
>  tests/amdgpu/amd_security.c         |  4 +-
>  7 files changed, 148 insertions(+), 22 deletions(-)
>
> diff --git a/lib/amdgpu/amd_command_submission.c b/lib/amdgpu/amd_command_submission.c
> index 02cf9357b..b674ba640 100644
> --- a/lib/amdgpu/amd_command_submission.c
> +++ b/lib/amdgpu/amd_command_submission.c
> @@ -18,7 +18,7 @@
>   */
>  
>  void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type,
> -				struct amdgpu_ring_context *ring_context)
> +				struct amdgpu_ring_context *ring_context, int expect)
>  {
>  	int r;
>  	uint32_t expired;
> @@ -31,15 +31,23 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
>  
>  	amdgpu_bo_handle *all_res = alloca(sizeof(ring_context->resources[0]) * (ring_context->res_cnt + 1));
>  
> +	if (expect) {
> +		/* allocate IB */
> +		r = amdgpu_bo_alloc_and_map(device, ring_context->write_length, 4096,
> +					    AMDGPU_GEM_DOMAIN_GTT, 0,
> +					    &ib_result_handle, &ib_result_cpu,
> +					    &ib_result_mc_address, &va_handle);
> +	} else {
> +		/* prepare CS */
> +		igt_assert(ring_context->pm4_dw <= 1024);
> +		/* allocate IB */
> +		r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
> +					    AMDGPU_GEM_DOMAIN_GTT, 0,
> +					    &ib_result_handle, &ib_result_cpu,
> +					    &ib_result_mc_address, &va_handle);
>  
> -	/* prepare CS */
> -	igt_assert(ring_context->pm4_dw <= 1024);
>  
> -	/* allocate IB */
> -	r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
> -				    AMDGPU_GEM_DOMAIN_GTT, 0,
> -				    &ib_result_handle, &ib_result_cpu,
> -				    &ib_result_mc_address, &va_handle);
> +	}
>  	igt_assert_eq(r, 0);
>  
>  	/* copy PM4 packet to ring from caller */
> @@ -81,9 +89,13 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
>  	r = amdgpu_cs_query_fence_status(&fence_status,
>  					 AMDGPU_TIMEOUT_INFINITE,
>  					 0, &expired);
> -	igt_assert_eq(r, 0);
> -	igt_assert_eq(expired, true);
> -
> +	if (expect) {
> +		igt_assert_neq(r, 0);
> +		igt_assert_neq(expired, true);
> +	} else {
> +		igt_assert_eq(r, 0);
> +		igt_assert_eq(expired, true);
> +	}
>  	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
>  				 ib_result_mc_address, 4096);
>  }
> @@ -145,7 +157,7 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>  
>  			ring_context->ring_id = ring_id;
>  
> -			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  			/* verify if SDMA test result meets with expected */
>  			i = 0;
> @@ -155,20 +167,20 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>  			} else if (ip_block->type == AMDGPU_HW_IP_GFX) {
>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  			} else if (ip_block->type == AMDGPU_HW_IP_DMA) {
>  				/* restore the bo_cpu to compare */
>  				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  				/* restore again, here dest_data should be */
>  				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
>  				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  				/* here bo_cpu[0] should be unchanged, still is 0x12345678, otherwise failed*/
>  				igt_assert_eq(ring_context->bo_cpu[0], ring_context->bo_cpu_origin);
>  			}
> @@ -236,7 +248,7 @@ void amdgpu_command_submission_const_fill_helper(amdgpu_device_handle device,
>  			/* fulfill PM4: test DMA const fill */
>  			ip_block->funcs->const_fill(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  			/* verify if SDMA test result meets with expected */
>  			r = ip_block->funcs->compare(ip_block->funcs, ring_context, 4);
> @@ -322,7 +334,7 @@ void amdgpu_command_submission_copy_linear_helper(amdgpu_device_handle device,
>  
>  				ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  				/* verify if SDMA test result meets with expected */
>  				r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, 4);
> diff --git a/lib/amdgpu/amd_command_submission.h b/lib/amdgpu/amd_command_submission.h
> index 58f3221a3..44f0cc958 100644
> --- a/lib/amdgpu/amd_command_submission.h
> +++ b/lib/amdgpu/amd_command_submission.h
> @@ -29,7 +29,8 @@
>  #include "amd_ip_blocks.h"
>  
>  void amdgpu_test_exec_cs_helper(amdgpu_device_handle device,
> -				unsigned int ip_type, struct amdgpu_ring_context *ring_context);
> +				unsigned int ip_type, struct amdgpu_ring_context *ring_context,
> +				int expect);
>  
>  void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
>  						   const struct amdgpu_ip_block_version *ip_block,
> diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
> index a6be5f02a..5b7d51d94 100644
> --- a/lib/amdgpu/amd_deadlock_helpers.c
> +++ b/lib/amdgpu/amd_deadlock_helpers.c
> @@ -13,6 +13,7 @@
>  #include "amd_memory.h"
>  #include "amd_deadlock_helpers.h"
>  #include "amd_ip_blocks.h"
> +#include "lib/amdgpu/amd_command_submission.h"
>  
>  #define MAX_JOB_COUNT 200
>  
> @@ -248,3 +249,92 @@ bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned i
>  	free_cmd_base(base_cmd);
>  	amdgpu_cs_ctx_free(context_handle);
>  }

#define MAX_DMABUF_COUNT 0x20000

#define MAX_DWORD_COUNT 256

> +
> +void
> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type)
> +{
> +	int j, r;
> +	uint32_t *ptr, offset;
> +	struct amdgpu_ring_context *ring_context;
> +	struct amdgpu_cmd_base *base_cmd = get_cmd_base();
> +	const struct amdgpu_ip_block_version *ip_block = get_ip_block(device_handle, AMDGPU_HW_IP_DMA);
> +
> +	ring_context = calloc(1, sizeof(*ring_context));
> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
> +		ring_context->write_length = 4096;
> +		ring_context->pm4 = calloc(*MAX_DWORD_COUNT*, sizeof(*ring_context->pm4));
> +		ring_context->pm4_size = *MAX_DWORD_COUNT*;
> +	} else {
> +		ring_context->write_length = *MAX_DWORD_COUNT* * 4 * *MAX_DMABUF_COUNT*;
> +		ring_context->pm4 = calloc(*MAX_DWORD_COUNT* * *MAX_DMABUF_COUNT*, sizeof(*ring_context->pm4));
> +		ring_context->pm4_size = *MAX_DWORD_COUNT* * *MAX_DMABUF_COUNT*;
> +	}
> +	ring_context->secure = false;
> +	ring_context->res_cnt = 2;
> +	igt_assert(ring_context->pm4);
> +
> +	r = amdgpu_cs_ctx_create(device_handle, &ring_context->context_handle);
> +	igt_assert_eq(r, 0);
> +
> +	r = amdgpu_bo_alloc_and_map(device_handle, ring_context->write_length, 4096,
> +					AMDGPU_GEM_DOMAIN_GTT, 0,
> +					&ring_context->bo, (void **)&ring_context->bo_cpu,
> +					&ring_context->bo_mc, &ring_context->va_handle);
> +	igt_assert_eq(r, 0);
> +
> +	/* set bo */
> +	memset((void *)ring_context->bo_cpu, 0, ring_context->write_length);
> +	r = amdgpu_bo_alloc_and_map(device_handle,
> +				    ring_context->write_length, 4096,
> +				    AMDGPU_GEM_DOMAIN_GTT,
> +				    0, &ring_context->bo2,
> +				    (void **)&ring_context->bo2_cpu, &ring_context->bo_mc2,
> +				    &ring_context->va_handle2);
> +	igt_assert_eq(r, 0);
> +
> +	/* set bo2 */
> +	memset((void *)ring_context->bo2_cpu, 0, ring_context->write_length);
> +	ring_context->resources[0] = ring_context->bo;
> +	ring_context->resources[1] = ring_context->bo2;
> +	base_cmd->attach_buf(base_cmd, ring_context->pm4, ring_context->write_length);
> +
> +	/* fulfill PM4: with bad copy linear header */
> +	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
> +		ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> +		base_cmd->emit_at_offset(base_cmd, 0x23decd3d, 0);
> +	} else {
> +		/* Save initialization pm4 */
> +		ptr = ring_context->pm4;
> +		for (j = 1; j < *MAX_DMABUF_COUNT*; j++) {
/* copy from buf1 to buf2 */
> +			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> +			ring_context->pm4 += ring_context->pm4_dw
> +			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> +
> +			offset = ring_context->pm4_dw * 2 * j;
/* override  addr of buf1 and buf 2 in order to copy from buf2 to buf1 */
> +			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc2), (offset - 4));
> +			base_cmd->emit_at_offset(base_cmd,
> +					((0xffffffff00000000 & ring_context->bo_mc2) >> 32), (offset - 3));
> +			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc), (offset - 2));
> +			base_cmd->emit_at_offset(base_cmd,
> +					((0xffffffff00000000 & ring_context->bo_mc) >> 32), (offset - 1));
> +			ring_context->pm4 += ring_context->pm4_dw;
> +		}
> +		/* restore pm4 */
> +		ring_context->pm4 = ptr;
> +		/* update the total pm4_dw */
> +		ring_context->pm4_dw = ring_context->pm4_dw * 2 * j;
> +	}
> +
> +	amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 1);
> +	amdgpu_bo_unmap_and_free(ring_context->bo, ring_context->va_handle, ring_context->bo_mc,
> +						 ring_context->write_length);
> +	amdgpu_bo_unmap_and_free(ring_context->bo2, ring_context->va_handle2, ring_context->bo_mc2,
> +						 ring_context->write_length);
> +	/* clean resources */
> +	free(ring_context->pm4);
> +	/* end of test */
> +	//r = amdgpu_cs_ctx_free(context_handle);
> +	r = amdgpu_cs_ctx_free(ring_context->context_handle);
> +	igt_assert_eq(r, 0);
> +	free_cmd_base(base_cmd);
> +}
> diff --git a/lib/amdgpu/amd_deadlock_helpers.h b/lib/amdgpu/amd_deadlock_helpers.h
> index cc8eba7f7..9c0d245a9 100644
> --- a/lib/amdgpu/amd_deadlock_helpers.h
> +++ b/lib/amdgpu/amd_deadlock_helpers.h
> @@ -24,11 +24,18 @@
>  #ifndef __AMD_DEADLOCK_HELPERS_H__
>  #define __AMD_DEADLOCK_HELPERS_H__
>  
> +enum  hang_type {
> +	DMA_CORRUPTED_HEADER_HANG,
> +	DMA_SLOW_LINEARCOPY_HANG
> +};
> +
>  void
>  amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned ip_type);
>  
>  void
>  bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned ip_type);
>  
> +void
> +amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type);
>  #endif
>  
> diff --git a/tests/amdgpu/amd_basic.c b/tests/amdgpu/amd_basic.c
> index 88fdbd980..70e45649d 100644
> --- a/tests/amdgpu/amd_basic.c
> +++ b/tests/amdgpu/amd_basic.c
> @@ -307,7 +307,7 @@ static void amdgpu_userptr_test(amdgpu_device_handle device)
>  
>  	ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
>  
> -	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  
>  	r = ip_block->funcs->compare(ip_block->funcs, ring_context, 1);
>  	igt_assert_eq(r, 0);
> @@ -412,7 +412,7 @@ amdgpu_bo_eviction_test(amdgpu_device_handle device_handle)
>  			ring_context->resources[2] = ring_context->boa_vram[loop2];
>  			ring_context->resources[3] = ring_context->boa_gtt[loop2];
>  			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
> -			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context);
> +			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 0);
>  			/* fulfill PM4: test DMA copy linear */
>  			r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, sdma_write_length);
>  			igt_assert_eq(r, 0);
> diff --git a/tests/amdgpu/amd_deadlock.c b/tests/amdgpu/amd_deadlock.c
> index 6147b7636..dc7ec4366 100644
> --- a/tests/amdgpu/amd_deadlock.c
> +++ b/tests/amdgpu/amd_deadlock.c
> @@ -77,6 +77,22 @@ igt_main
>  		}
>  	}
>  
> +	igt_describe("Test-GPU-reset-by-sdma-corrupted-header-with-jobs");
> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-corrupted-header-test") {
> +		if (arr_cap[AMD_IP_DMA]) {
> +			igt_dynamic_f("amdgpu-deadlock-sdma-corrupted-header-test")
> +			amdgpu_hang_sdma_helper(device, DMA_CORRUPTED_HEADER_HANG);
> +		}
> +	}
> +
> +	igt_describe("Test-GPU-reset-by-sdma-slow-linear-copy-with-jobs");
> +	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-slow-linear-copy") {
> +		if (arr_cap[AMD_IP_DMA]) {
> +			igt_dynamic_f("amdgpu-deadlock-sdma-slow-linear-copy")
> +			amdgpu_hang_sdma_helper(device, DMA_SLOW_LINEARCOPY_HANG);
> +		}
> +	}
> +
>  	igt_fixture {
>  		amdgpu_device_deinitialize(device);
>  		drm_close_driver(fd);
> diff --git a/tests/amdgpu/amd_security.c b/tests/amdgpu/amd_security.c
> index 46180df2e..1a7eba9eb 100644
> --- a/tests/amdgpu/amd_security.c
> +++ b/tests/amdgpu/amd_security.c
> @@ -110,7 +110,7 @@ amdgpu_bo_lcopy(amdgpu_device_handle device,
>  
>  	amdgpu_sdma_lcopy(ring_context->pm4, ring_context->bo_mc2,
>  			ring_context->bo_mc, size, secure);
> -	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  	free(ring_context->pm4);
>  }
>  
> @@ -155,7 +155,7 @@ amdgpu_bo_move(amdgpu_device_handle device, int fd,
>  	 * it to the desired location.
>  	 */
>  	amdgpu_sdma_nop(ring_context->pm4, PACKET_NOP_SIZE);
> -	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
> +	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
>  	free(ring_context->pm4);
>  }
>  

[-- Attachment #2: Type: text/html, Size: 17129 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA
@ 2023-10-23  2:04 Jesse Zhang
  2023-10-24  5:17 ` [igt-dev] ✗ Fi.CI.BAT: failure for tests/amd_dispatch: add negative test for SDMA (rev4) Patchwork
  0 siblings, 1 reply; 8+ messages in thread
From: Jesse Zhang @ 2023-10-23  2:04 UTC (permalink / raw)
  To: igt-dev; +Cc: Tim Huang, Luben Tuikov, Alex Deucher, Christian Koenig

Issue corrupted header or slow sdma linear copy
to trigger SDMA hang test.

V4:
  - avoid generating warning,
    and optimize logical code. (Vitaly)
  - Use existing interfaces
    to clean up code. (Vitaly)
  - add define and additional
    comments for clarification. (Vitaly)

Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Kamil Konieczny <kamil.konieczny@linux.intel.com>

Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
Signed-off-by: Tim Huang <tim.huang@amd.com>
Reviewed-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
---
 lib/amdgpu/amd_command_submission.c | 46 ++++++++------
 lib/amdgpu/amd_command_submission.h |  3 +-
 lib/amdgpu/amd_deadlock_helpers.c   | 95 +++++++++++++++++++++++++++++
 lib/amdgpu/amd_deadlock_helpers.h   |  7 +++
 tests/amdgpu/amd_basic.c            |  4 +-
 tests/amdgpu/amd_deadlock.c         | 16 +++++
 tests/amdgpu/amd_security.c         |  4 +-
 7 files changed, 153 insertions(+), 22 deletions(-)

diff --git a/lib/amdgpu/amd_command_submission.c b/lib/amdgpu/amd_command_submission.c
index 02cf9357b..b674ba640 100644
--- a/lib/amdgpu/amd_command_submission.c
+++ b/lib/amdgpu/amd_command_submission.c
@@ -18,7 +18,7 @@
  */
 
 void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type,
-				struct amdgpu_ring_context *ring_context)
+				struct amdgpu_ring_context *ring_context, int expect)
 {
 	int r;
 	uint32_t expired;
@@ -31,15 +31,23 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
 
 	amdgpu_bo_handle *all_res = alloca(sizeof(ring_context->resources[0]) * (ring_context->res_cnt + 1));
 
+	if (expect) {
+		/* allocate IB */
+		r = amdgpu_bo_alloc_and_map(device, ring_context->write_length, 4096,
+					    AMDGPU_GEM_DOMAIN_GTT, 0,
+					    &ib_result_handle, &ib_result_cpu,
+					    &ib_result_mc_address, &va_handle);
+	} else {
+		/* prepare CS */
+		igt_assert(ring_context->pm4_dw <= 1024);
+		/* allocate IB */
+		r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
+					    AMDGPU_GEM_DOMAIN_GTT, 0,
+					    &ib_result_handle, &ib_result_cpu,
+					    &ib_result_mc_address, &va_handle);
 
-	/* prepare CS */
-	igt_assert(ring_context->pm4_dw <= 1024);
 
-	/* allocate IB */
-	r = amdgpu_bo_alloc_and_map(device, 4096, 4096,
-				    AMDGPU_GEM_DOMAIN_GTT, 0,
-				    &ib_result_handle, &ib_result_cpu,
-				    &ib_result_mc_address, &va_handle);
+	}
 	igt_assert_eq(r, 0);
 
 	/* copy PM4 packet to ring from caller */
@@ -81,9 +89,13 @@ void amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_typ
 	r = amdgpu_cs_query_fence_status(&fence_status,
 					 AMDGPU_TIMEOUT_INFINITE,
 					 0, &expired);
-	igt_assert_eq(r, 0);
-	igt_assert_eq(expired, true);
-
+	if (expect) {
+		igt_assert_neq(r, 0);
+		igt_assert_neq(expired, true);
+	} else {
+		igt_assert_eq(r, 0);
+		igt_assert_eq(expired, true);
+	}
 	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
 				 ib_result_mc_address, 4096);
 }
@@ -145,7 +157,7 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
 
 			ring_context->ring_id = ring_id;
 
-			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+			 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 			/* verify if SDMA test result meets with expected */
 			i = 0;
@@ -155,20 +167,20 @@ void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
 			} else if (ip_block->type == AMDGPU_HW_IP_GFX) {
 				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 			} else if (ip_block->type == AMDGPU_HW_IP_DMA) {
 				/* restore the bo_cpu to compare */
 				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
 				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 				/* restore again, here dest_data should be */
 				ring_context->bo_cpu_origin = ring_context->bo_cpu[0];
 				ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 				/* here bo_cpu[0] should be unchanged, still is 0x12345678, otherwise failed*/
 				igt_assert_eq(ring_context->bo_cpu[0], ring_context->bo_cpu_origin);
 			}
@@ -236,7 +248,7 @@ void amdgpu_command_submission_const_fill_helper(amdgpu_device_handle device,
 			/* fulfill PM4: test DMA const fill */
 			ip_block->funcs->const_fill(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+			amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 			/* verify if SDMA test result meets with expected */
 			r = ip_block->funcs->compare(ip_block->funcs, ring_context, 4);
@@ -322,7 +334,7 @@ void amdgpu_command_submission_copy_linear_helper(amdgpu_device_handle device,
 
 				ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+				amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 				/* verify if SDMA test result meets with expected */
 				r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, 4);
diff --git a/lib/amdgpu/amd_command_submission.h b/lib/amdgpu/amd_command_submission.h
index 58f3221a3..44f0cc958 100644
--- a/lib/amdgpu/amd_command_submission.h
+++ b/lib/amdgpu/amd_command_submission.h
@@ -29,7 +29,8 @@
 #include "amd_ip_blocks.h"
 
 void amdgpu_test_exec_cs_helper(amdgpu_device_handle device,
-				unsigned int ip_type, struct amdgpu_ring_context *ring_context);
+				unsigned int ip_type, struct amdgpu_ring_context *ring_context,
+				int expect);
 
 void amdgpu_command_submission_write_linear_helper(amdgpu_device_handle device,
 						   const struct amdgpu_ip_block_version *ip_block,
diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
index a6be5f02a..612f127fd 100644
--- a/lib/amdgpu/amd_deadlock_helpers.c
+++ b/lib/amdgpu/amd_deadlock_helpers.c
@@ -13,6 +13,7 @@
 #include "amd_memory.h"
 #include "amd_deadlock_helpers.h"
 #include "amd_ip_blocks.h"
+#include "lib/amdgpu/amd_command_submission.h"
 
 #define MAX_JOB_COUNT 200
 
@@ -248,3 +249,97 @@ bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned i
 	free_cmd_base(base_cmd);
 	amdgpu_cs_ctx_free(context_handle);
 }
+
+#define MAX_DMABUF_COUNT 0x20000
+#define MAX_DWORD_COUNT 256
+
+void
+amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type)
+{
+	int j, r;
+	uint32_t *ptr, offset;
+	struct amdgpu_ring_context *ring_context;
+	struct amdgpu_cmd_base *base_cmd = get_cmd_base();
+	const struct amdgpu_ip_block_version *ip_block = get_ip_block(device_handle, AMDGPU_HW_IP_DMA);
+
+	ring_context = calloc(1, sizeof(*ring_context));
+	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
+		ring_context->write_length = 4096;
+		ring_context->pm4 = calloc(MAX_DWORD_COUNT, sizeof(*ring_context->pm4));
+		ring_context->pm4_size = MAX_DWORD_COUNT;
+	} else {
+		ring_context->write_length = MAX_DWORD_COUNT * 4 * MAX_DMABUF_COUNT;
+		ring_context->pm4 = calloc(MAX_DWORD_COUNT * MAX_DMABUF_COUNT, sizeof(*ring_context->pm4));
+		ring_context->pm4_size = MAX_DWORD_COUNT * MAX_DMABUF_COUNT;
+	}
+	ring_context->secure = false;
+	ring_context->res_cnt = 2;
+	igt_assert(ring_context->pm4);
+
+	r = amdgpu_cs_ctx_create(device_handle, &ring_context->context_handle);
+	igt_assert_eq(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle, ring_context->write_length, 4096,
+					AMDGPU_GEM_DOMAIN_GTT, 0,
+					&ring_context->bo, (void **)&ring_context->bo_cpu,
+					&ring_context->bo_mc, &ring_context->va_handle);
+	igt_assert_eq(r, 0);
+
+	/* set bo */
+	memset((void *)ring_context->bo_cpu, 0, ring_context->write_length);
+	r = amdgpu_bo_alloc_and_map(device_handle,
+				    ring_context->write_length, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT,
+				    0, &ring_context->bo2,
+				    (void **)&ring_context->bo2_cpu, &ring_context->bo_mc2,
+				    &ring_context->va_handle2);
+	igt_assert_eq(r, 0);
+
+	/* set bo2 */
+	memset((void *)ring_context->bo2_cpu, 0, ring_context->write_length);
+	ring_context->resources[0] = ring_context->bo;
+	ring_context->resources[1] = ring_context->bo2;
+	base_cmd->attach_buf(base_cmd, ring_context->pm4, ring_context->write_length);
+
+	/* fulfill PM4: with bad copy linear header */
+	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
+		ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
+		base_cmd->emit_at_offset(base_cmd, 0x23decd3d, 0);
+	} else {
+		/* Save initialization pm4 */
+		ptr = ring_context->pm4;
+		for (j = 1; j < MAX_DMABUF_COUNT; j++) {
+			/* copy from buf1 to buf2 */
+			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
+			ring_context->pm4 += ring_context->pm4_dw;
+			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
+
+			offset = ring_context->pm4_dw * 2 * j;
+			/* override  addr of buf1 and buf 2 in order to copy from buf2 to buf1 */
+			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc2), (offset - 4));
+			base_cmd->emit_at_offset(base_cmd,
+					((0xffffffff00000000 & ring_context->bo_mc2) >> 32), (offset - 3));
+			base_cmd->emit_at_offset(base_cmd, (0xffffffff & ring_context->bo_mc), (offset - 2));
+			base_cmd->emit_at_offset(base_cmd,
+					((0xffffffff00000000 & ring_context->bo_mc) >> 32), (offset - 1));
+			ring_context->pm4 += ring_context->pm4_dw;
+		}
+		/* restore pm4 */
+		ring_context->pm4 = ptr;
+		/* update the total pm4_dw */
+		ring_context->pm4_dw = ring_context->pm4_dw * 2 * j;
+	}
+
+	amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 1);
+	amdgpu_bo_unmap_and_free(ring_context->bo, ring_context->va_handle, ring_context->bo_mc,
+						 ring_context->write_length);
+	amdgpu_bo_unmap_and_free(ring_context->bo2, ring_context->va_handle2, ring_context->bo_mc2,
+						 ring_context->write_length);
+	/* clean resources */
+	free(ring_context->pm4);
+	/* end of test */
+	//r = amdgpu_cs_ctx_free(context_handle);
+	r = amdgpu_cs_ctx_free(ring_context->context_handle);
+	igt_assert_eq(r, 0);
+	free_cmd_base(base_cmd);
+}
diff --git a/lib/amdgpu/amd_deadlock_helpers.h b/lib/amdgpu/amd_deadlock_helpers.h
index cc8eba7f7..9c0d245a9 100644
--- a/lib/amdgpu/amd_deadlock_helpers.h
+++ b/lib/amdgpu/amd_deadlock_helpers.h
@@ -24,11 +24,18 @@
 #ifndef __AMD_DEADLOCK_HELPERS_H__
 #define __AMD_DEADLOCK_HELPERS_H__
 
+enum  hang_type {
+	DMA_CORRUPTED_HEADER_HANG,
+	DMA_SLOW_LINEARCOPY_HANG
+};
+
 void
 amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned ip_type);
 
 void
 bad_access_helper(amdgpu_device_handle device_handle, int reg_access, unsigned ip_type);
 
+void
+amdgpu_hang_sdma_helper(amdgpu_device_handle device_handle, uint8_t hang_type);
 #endif
 
diff --git a/tests/amdgpu/amd_basic.c b/tests/amdgpu/amd_basic.c
index 88fdbd980..70e45649d 100644
--- a/tests/amdgpu/amd_basic.c
+++ b/tests/amdgpu/amd_basic.c
@@ -307,7 +307,7 @@ static void amdgpu_userptr_test(amdgpu_device_handle device)
 
 	ip_block->funcs->write_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
 
-	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+	 amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 
 	r = ip_block->funcs->compare(ip_block->funcs, ring_context, 1);
 	igt_assert_eq(r, 0);
@@ -412,7 +412,7 @@ amdgpu_bo_eviction_test(amdgpu_device_handle device_handle)
 			ring_context->resources[2] = ring_context->boa_vram[loop2];
 			ring_context->resources[3] = ring_context->boa_gtt[loop2];
 			ip_block->funcs->copy_linear(ip_block->funcs, ring_context, &ring_context->pm4_dw);
-			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context);
+			amdgpu_test_exec_cs_helper(device_handle, ip_block->type, ring_context, 0);
 			/* fulfill PM4: test DMA copy linear */
 			r = ip_block->funcs->compare_pattern(ip_block->funcs, ring_context, sdma_write_length);
 			igt_assert_eq(r, 0);
diff --git a/tests/amdgpu/amd_deadlock.c b/tests/amdgpu/amd_deadlock.c
index 6147b7636..dc7ec4366 100644
--- a/tests/amdgpu/amd_deadlock.c
+++ b/tests/amdgpu/amd_deadlock.c
@@ -77,6 +77,22 @@ igt_main
 		}
 	}
 
+	igt_describe("Test-GPU-reset-by-sdma-corrupted-header-with-jobs");
+	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-corrupted-header-test") {
+		if (arr_cap[AMD_IP_DMA]) {
+			igt_dynamic_f("amdgpu-deadlock-sdma-corrupted-header-test")
+			amdgpu_hang_sdma_helper(device, DMA_CORRUPTED_HEADER_HANG);
+		}
+	}
+
+	igt_describe("Test-GPU-reset-by-sdma-slow-linear-copy-with-jobs");
+	igt_subtest_with_dynamic("amdgpu-deadlock-sdma-slow-linear-copy") {
+		if (arr_cap[AMD_IP_DMA]) {
+			igt_dynamic_f("amdgpu-deadlock-sdma-slow-linear-copy")
+			amdgpu_hang_sdma_helper(device, DMA_SLOW_LINEARCOPY_HANG);
+		}
+	}
+
 	igt_fixture {
 		amdgpu_device_deinitialize(device);
 		drm_close_driver(fd);
diff --git a/tests/amdgpu/amd_security.c b/tests/amdgpu/amd_security.c
index 46180df2e..1a7eba9eb 100644
--- a/tests/amdgpu/amd_security.c
+++ b/tests/amdgpu/amd_security.c
@@ -110,7 +110,7 @@ amdgpu_bo_lcopy(amdgpu_device_handle device,
 
 	amdgpu_sdma_lcopy(ring_context->pm4, ring_context->bo_mc2,
 			ring_context->bo_mc, size, secure);
-	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 	free(ring_context->pm4);
 }
 
@@ -155,7 +155,7 @@ amdgpu_bo_move(amdgpu_device_handle device, int fd,
 	 * it to the desired location.
 	 */
 	amdgpu_sdma_nop(ring_context->pm4, PACKET_NOP_SIZE);
-	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context);
+	amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context, 0);
 	free(ring_context->pm4);
 }
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [igt-dev] ✗ Fi.CI.BAT: failure for tests/amd_dispatch: add negative test for SDMA (rev4)
  2023-10-23  2:04 [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA Jesse Zhang
@ 2023-10-24  5:17 ` Patchwork
  0 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2023-10-24  5:17 UTC (permalink / raw)
  To: Jesse Zhang; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 245 bytes --]

== Series Details ==

Series: tests/amd_dispatch: add negative test for SDMA (rev4)
URL   : https://patchwork.freedesktop.org/series/125215/
State : failure

== Summary ==

Series 125215 revision 4 was fully merged or fully failed: no git log



[-- Attachment #2: Type: text/html, Size: 714 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-10-24  5:17 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-10-23  2:04 [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA Jesse Zhang
2023-10-24  5:17 ` [igt-dev] ✗ Fi.CI.BAT: failure for tests/amd_dispatch: add negative test for SDMA (rev4) Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2023-10-20  5:21 [igt-dev] [PATCH] tests/amd_dispatch: add negative test for SDMA Jesse Zhang
2023-10-20 11:37 ` Kamil Konieczny
2023-10-20 16:08   ` vitaly prosyak
2023-10-22  3:42 ` vitaly prosyak
2023-10-18  5:33 Jesse Zhang
2023-10-19  2:39 ` vitaly prosyak

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox