Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Sunil Khatri <sunil.khatri@amd.com>
To: igt-dev@lists.freedesktop.org
Cc: "Alex Deucher" <alexander.deucher@amd.com>,
	"Christian König" <christian.koenig@amd.com>,
	"Vitaly Prosyak" <vitaly.prosyak@amd.com>,
	"Sunil Khatri" <sunil.khatri@amd.com>
Subject: [PATCH v3 08/19] tests/amdgpu: Add amdgpu_sync_dependency_test with UMQ
Date: Fri, 28 Mar 2025 13:54:05 +0530	[thread overview]
Message-ID: <20250328082416.1469810-8-sunil.khatri@amd.com> (raw)
In-Reply-To: <20250328082416.1469810-1-sunil.khatri@amd.com>

Add UMQ support in amdgpu_sync_dependency_test and also
add a new test case which will run this test for UMQ
submission.

Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
---
 tests/amdgpu/amd_basic.c | 95 ++++++++++++++++++++++++++++++----------
 1 file changed, 72 insertions(+), 23 deletions(-)

diff --git a/tests/amdgpu/amd_basic.c b/tests/amdgpu/amd_basic.c
index 2b339c74b..643a147f5 100644
--- a/tests/amdgpu/amd_basic.c
+++ b/tests/amdgpu/amd_basic.c
@@ -475,7 +475,7 @@ amdgpu_bo_eviction_test(amdgpu_device_handle device_handle)
 }
 
 static void
-amdgpu_sync_dependency_test(amdgpu_device_handle device_handle)
+amdgpu_sync_dependency_test(amdgpu_device_handle device_handle, bool user_queue)
 {
 	const unsigned const_size = 8192;
 	const unsigned const_alignment = 4096;
@@ -498,25 +498,44 @@ amdgpu_sync_dependency_test(amdgpu_device_handle device_handle)
 
 	uint32_t size_bytes, code_offset, data_offset;
 	const uint32_t *shader;
+	struct amdgpu_ring_context *ring_context;
 
 	struct amdgpu_cmd_base *base = get_cmd_base();
 	const struct amdgpu_ip_block_version *ip_block = get_ip_block(device_handle, AMD_IP_GFX);
 
-	r = amdgpu_cs_ctx_create(device_handle, &context_handle[0]);
-	igt_assert_eq(r, 0);
-	r = amdgpu_cs_ctx_create(device_handle, &context_handle[1]);
-	igt_assert_eq(r, 0);
+	ring_context = calloc(1, sizeof(*ring_context));
+	igt_assert(ring_context);
 
-	r = amdgpu_bo_alloc_and_map(device_handle, const_size, const_alignment,
-			AMDGPU_GEM_DOMAIN_GTT, 0,
-			&ib_result_handle, &ib_result_cpu,
-			&ib_result_mc_address, &va_handle);
+	if (user_queue) {
+		amdgpu_user_queue_create(device_handle, ring_context, ip_block->type);
+	} else {
+		r = amdgpu_cs_ctx_create(device_handle, &context_handle[0]);
+		igt_assert_eq(r, 0);
+
+		r = amdgpu_cs_ctx_create(device_handle, &context_handle[1]);
+		igt_assert_eq(r, 0);
+	}
+
+	r = amdgpu_bo_alloc_and_map_sync(device_handle, const_size,
+					 const_alignment, AMDGPU_GEM_DOMAIN_GTT, 0,
+					 AMDGPU_VM_MTYPE_UC,
+					 &ib_result_handle, &ib_result_cpu,
+					 &ib_result_mc_address, &va_handle,
+					 ring_context->timeline_syncobj_handle,
+					 ++ring_context->point, user_queue);
 
 	igt_assert_eq(r, 0);
 
-	r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
+	if (user_queue) {
+		r = amdgpu_timeline_syncobj_wait(device_handle,
+						 ring_context->timeline_syncobj_handle,
+						 ring_context->point);
+		igt_assert_eq(r, 0);
+	} else {
+		r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
 			       &bo_list);
-	igt_assert_eq(r, 0);
+		igt_assert_eq(r, 0);
+	}
 
 	shader = get_shader_bin(&size_bytes, &code_offset, &data_offset);
 
@@ -585,7 +604,14 @@ amdgpu_sync_dependency_test(amdgpu_device_handle device_handle)
 	ibs_request.resources = bo_list;
 	ibs_request.fence_info.handle = NULL;
 
-	r = amdgpu_cs_submit(context_handle[1], 0, &ibs_request, 1);
+	if (user_queue) {
+		ring_context->pm4_dw = ib_info.size;
+		amdgpu_user_queue_submit(device_handle, ring_context, ip_block->type,
+					 ib_result_mc_address);
+	} else {
+		r = amdgpu_cs_submit(context_handle[1], 0, &ibs_request, 1);
+	}
+
 	igt_assert_eq(r, 0);
 	seq_no = ibs_request.seq_no;
 
@@ -618,8 +644,14 @@ amdgpu_sync_dependency_test(amdgpu_device_handle device_handle)
 	ibs_request.dependencies[0].ring = 0;
 	ibs_request.dependencies[0].fence = seq_no;
 
-	r = amdgpu_cs_submit(context_handle[0], 0, &ibs_request, 1);
-	igt_assert_eq(r, 0);
+	if (user_queue) {
+		ring_context->pm4_dw = ib_info.size;
+		amdgpu_user_queue_submit(device_handle, ring_context, ip_block->type,
+					ib_info.ib_mc_address);
+	} else {
+		r = amdgpu_cs_submit(context_handle[0], 0, &ibs_request, 1);
+		igt_assert_eq(r, 0);
+	}
 
 	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
 	fence_status.context = context_handle[0];
@@ -628,24 +660,33 @@ amdgpu_sync_dependency_test(amdgpu_device_handle device_handle)
 	fence_status.ring = 0;
 	fence_status.fence = ibs_request.seq_no;
 
-	r = amdgpu_cs_query_fence_status(&fence_status,
+	if (!user_queue) {
+		r = amdgpu_cs_query_fence_status(&fence_status,
 		       AMDGPU_TIMEOUT_INFINITE, 0, &expired);
-	igt_assert_eq(r, 0);
+		igt_assert_eq(r, 0);
+	}
 
 	/* Expect the second command to wait for shader to complete */
 	igt_assert_eq(base->buf[data_offset], 99);
 
-	r = amdgpu_bo_list_destroy(bo_list);
-	igt_assert_eq(r, 0);
+	if (!user_queue) {
+		r = amdgpu_bo_list_destroy(bo_list);
+		igt_assert_eq(r, 0);
+	}
 
-	 amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
-				     ib_result_mc_address, const_alignment);
+	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+				 ib_result_mc_address, const_alignment);
 
-	amdgpu_cs_ctx_free(context_handle[0]);
-	amdgpu_cs_ctx_free(context_handle[1]);
+	if (user_queue) {
+		amdgpu_user_queue_destroy(device_handle, ring_context, ip_block->type);
+	} else {
+		amdgpu_cs_ctx_free(context_handle[0]);
+		amdgpu_cs_ctx_free(context_handle[1]);
+	}
 
 	free(ibs_request.dependencies);
 	free_cmd_base(base);
+	free(ring_context);
 }
 
 igt_main
@@ -743,7 +784,7 @@ igt_main
 	igt_subtest_with_dynamic("sync-dependency-test-with-IP-GFX") {
 		if (arr_cap[AMD_IP_GFX]) {
 			igt_dynamic_f("sync-dependency-test")
-			amdgpu_sync_dependency_test(device);
+			amdgpu_sync_dependency_test(device, false);
 		}
 	}
 
@@ -763,6 +804,14 @@ igt_main
 		}
 	}
 
+	igt_describe("Check-sync-dependency-using-GFX-ring");
+	igt_subtest_with_dynamic("sync-dependency-test-with-IP-GFX-UMQ") {
+		if (arr_cap[AMD_IP_GFX]) {
+			igt_dynamic_f("sync-dependency-test-with-umq")
+			amdgpu_sync_dependency_test(device, true);
+		}
+	}
+
 	igt_fixture {
 		amdgpu_device_deinitialize(device);
 		drm_close_driver(fd);
-- 
2.43.0


  parent reply	other threads:[~2025-03-28  8:24 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-03-28  8:23 [PATCH v3 01/19] drm-uapi/amdgpu: sync with drm-next Sunil Khatri
2025-03-28  8:23 ` [PATCH v3 02/19] " Sunil Khatri
2025-03-31 19:11   ` vitaly prosyak
2025-04-01  4:39     ` Khatri, Sunil
2025-04-01  4:50       ` vitaly prosyak
2025-04-01  5:46         ` Khatri, Sunil
2025-04-01 16:09   ` Kamil Konieczny
2025-03-28  8:24 ` [PATCH v3 03/19] lib/amdgpu: Add user mode queue support in ring context Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 04/19] lib/amdgpu: Add support of amd user queues Sunil Khatri
2025-04-01  4:21   ` vitaly prosyak
2025-04-01  4:41     ` Khatri, Sunil
2025-03-28  8:24 ` [PATCH v3 05/19] lib/amdgpu: add func amdgpu_bo_alloc_and_map_sync Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 06/19] tests/amdgpu: Add user queue support for gfx and compute Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 07/19] tests/amdgpu: Add UMQ submission tests " Sunil Khatri
2025-03-28  8:24 ` Sunil Khatri [this message]
2025-03-28  8:24 ` [PATCH v3 09/19] tests/amdgpu: use memory API's from amd_memory.h Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 10/19] lib/amdgpu: add macro for adding cmds in user queue Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 11/19] lib/amdgpu: use macro to add cmds in the user ring Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 12/19] tests/amdgpu: Add amdgpu_cp_nops tests for UMQ Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 13/19] drm-uapi/amdgpu: sync with drm-next Sunil Khatri
2025-04-01 16:06   ` Kamil Konieczny
2025-04-01 23:52     ` vitaly prosyak
2025-04-02 10:51       ` Kamil Konieczny
2025-04-01 23:57     ` vitaly prosyak
2025-03-28  8:24 ` [PATCH v3 14/19] lib/amdgpu: use right API to get the correct size Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 15/19] lib/amdgpu: use a memory fence to serialize write Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 16/19] tests/amdgpu: disable check for IP presense with no kernel queue Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 17/19] lib/amdgpu: make the local functions as static Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 18/19] lib/amdgpu: enable UMQ function under macro Sunil Khatri
2025-03-28  8:24 ` [PATCH v3 19/19] tests/amdgpu: Disable the UMQ tests under a macro Sunil Khatri
2025-03-28 13:01 ` ✓ Xe.CI.BAT: success for series starting with [v3,01/19] drm-uapi/amdgpu: sync with drm-next Patchwork
2025-03-28 13:12 ` ✗ i915.CI.BAT: failure " Patchwork
2025-03-29  0:43 ` ✗ Xe.CI.Full: " Patchwork
2025-04-01 23:46 ` [PATCH v3 01/19] " vitaly prosyak
2025-04-06 18:47 ` ✗ Xe.CI.Full: failure for series starting with [v3,01/19] " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250328082416.1469810-8-sunil.khatri@amd.com \
    --to=sunil.khatri@amd.com \
    --cc=alexander.deucher@amd.com \
    --cc=christian.koenig@amd.com \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=vitaly.prosyak@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox