From: "Jesse.zhang@amd.com" <jesse.zhang@amd.com>
To: <igt-dev@lists.freedesktop.org>
Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>,
Alex Deucher <alexander.deucher@amd.com>,
Christian Koenig <christian.koenig@amd.com>,
"Jesse.zhang@amd.com" <jesse.zhang@amd.com>
Subject: [PATCH i-g-t] lib/amdgpu: ad support for page queues in amd_deadlock
Date: Mon, 24 Feb 2025 15:23:16 +0800 [thread overview]
Message-ID: <20250224072316.4117581-1-jesse.zhang@amd.com> (raw)
This commit introduces enhancements to the deadlock to handle
page queues and modify the logic for enabling/disabling scheduling rings.
- New Function `is_support_page_queue`:
- Checks if page queue files exist for a given IP block type and PCI address.
- Modify `amdgpu_wait_memory_helper`:
- Updates the logic for enabling/disabling scheduling rings based on whether page queues are supported.
- Calls `is_support_page_queue` to check if page queues are supported.
- If page queues are supported, enables two rings (sdma gfx queue and page queue).
- Similar Modifications in Other Functions:
- Applies similar logic to handle page queues in `bad_access_ring_helper` and `amdgpu_hang_sdma_ring_helper`.
- Ensures consistency across different helper functions, maintaining the same logic for handling page queues.
Cc: Vitaly Prosyak <vitaly.prosyak@amd.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Alexander Deucher <alexander.deucher@amd.com>
Signed-off-by: Jesse Zhang <jesse.zhang@amd.com>
---
lib/amdgpu/amd_deadlock_helpers.c | 96 ++++++++++++++++++++++++++-----
1 file changed, 81 insertions(+), 15 deletions(-)
diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
index d7bf0e111..3463653a7 100644
--- a/lib/amdgpu/amd_deadlock_helpers.c
+++ b/lib/amdgpu/amd_deadlock_helpers.c
@@ -10,6 +10,7 @@
#include <unistd.h>
#include <pthread.h>
#include <signal.h>
+#include <glob.h>
#include "amd_memory.h"
#include "amd_deadlock_helpers.h"
#include "lib/amdgpu/amd_command_submission.h"
@@ -26,6 +27,31 @@ struct thread_param {
static int
use_uc_mtype = 1;
+/* Function to check if page queue files exist for a given IP block type and PCI address */
+static bool
+is_support_page_queue(enum amd_ip_block_type ip_type, const struct pci_addr *pci)
+{
+ glob_t glob_result;
+ int ret;
+ char search_pattern[1024];
+
+ /* If the IP type is not SDMA, return false */
+ if (ip_type != AMD_IP_DMA)
+ return false;
+
+ /* Construct the search pattern for the page queue files */
+ snprintf(search_pattern, sizeof(search_pattern) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_ring_page*",
+ pci->domain, pci->bus, pci->device, pci->function);
+
+ /* Use glob to find files matching the pattern */
+ ret = glob(search_pattern, GLOB_NOSORT, NULL, &glob_result);
+ /* Free the memory allocated by glob */
+ globfree(&glob_result);
+
+ /* Return true if files matching the pattern were found, otherwise return false */
+ return (ret == 0 && glob_result.gl_pathc > 0);
+}
+
static void*
write_mem_address(void *data)
{
@@ -179,16 +205,19 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
FILE *fp;
char cmd[1024];
char buffer[128];
- long sched_mask = 0;
+ uint64_t sched_mask = 0, ring_id;
struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, prio;
+ uint32_t prio;
char sysfs[125];
+ bool support_page;
r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
igt_assert_eq(r, 0);
if (!info.available_rings)
igt_info("SKIP ... as there's no ring for ip %d\n", ip_type);
+ support_page = is_support_page_queue(ip_type, pci);
+
if (ip_type == AMD_IP_GFX)
snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_gfx_sched_mask",
pci->domain, pci->bus, pci->device, pci->function);
@@ -215,7 +244,7 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
igt_info("The scheduling ring only enables one for ip %d\n", ip_type);
}
- for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
+ for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id += 1) {
/* check sched is ready is on the ring. */
if (!((1 << ring_id) & sched_mask))
continue;
@@ -239,9 +268,20 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
}
if (sched_mask > 1) {
- snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
- 0x1 << ring_id, sysfs);
- igt_info("Disable other rings, keep only ring: %d enabled, cmd: %s\n", ring_id, cmd);
+ /* If page queues are supported, run with
+ * multiple queues(sdma gfx queue + page queue)
+ */
+ if (support_page) {
+ snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
+ 0x3 << ring_id, sysfs);
+ igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
+ ring_id++;
+
+ } else {
+ snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
+ 0x1 << ring_id, sysfs);
+ igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
+ }
r = system(cmd);
igt_assert_eq(r, 0);
}
@@ -411,16 +451,18 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
FILE *fp;
char cmd[1024];
char buffer[128];
- long sched_mask = 0;
+ uint64_t sched_mask = 0, ring_id;
struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, prio;
+ uint32_t prio;
char sysfs[125];
+ bool support_page;
r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
igt_assert_eq(r, 0);
if (!info.available_rings)
igt_info("SKIP ... as there's no ring for ip %d\n", ip_type);
+ support_page = is_support_page_queue(ip_type, pci);
if (ip_type == AMD_IP_GFX)
snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_gfx_sched_mask",
pci->domain, pci->bus, pci->device, pci->function);
@@ -447,7 +489,7 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
igt_info("The scheduling ring only enables one for ip %d\n", ip_type);
}
- for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
+ for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id++) {
/* check sched is ready is on the ring. */
if (!((1 << ring_id) & sched_mask))
continue;
@@ -471,9 +513,20 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
}
if (sched_mask > 1) {
- snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
+ /* If page queues are supported, run with
+ * multiple queues(sdma gfx queue + page queue)
+ */
+ if (support_page) {
+ snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
+ 0x3 << ring_id, sysfs);
+ igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
+ ring_id++;
+ } else {
+ snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
0x1 << ring_id, sysfs);
- igt_info("Disable other rings, keep only ring: %d enabled, cmd: %s\n", ring_id, cmd);
+ igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
+ }
+
r = system(cmd);
igt_assert_eq(r, 0);
}
@@ -496,16 +549,17 @@ void amdgpu_hang_sdma_ring_helper(amdgpu_device_handle device_handle, uint8_t ha
FILE *fp;
char cmd[1024];
char buffer[128];
- long sched_mask = 0;
+ uint64_t sched_mask = 0, ring_id;
struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id;
char sysfs[125];
+ bool support_page;
r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
igt_assert_eq(r, 0);
if (!info.available_rings)
igt_info("SKIP ... as there's no ring for the sdma\n");
+ support_page = is_support_page_queue(AMDGPU_HW_IP_DMA, pci);
snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_sdma_sched_mask",
pci->domain, pci->bus, pci->device, pci->function);
snprintf(cmd, sizeof(cmd) - 1, "sudo cat %s", sysfs);
@@ -522,14 +576,26 @@ void amdgpu_hang_sdma_ring_helper(amdgpu_device_handle device_handle, uint8_t ha
} else
sched_mask = 1;
- for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
+ for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id++) {
/* check sched is ready is on the ring. */
if (!((1 << ring_id) & sched_mask))
continue;
if (sched_mask > 1) {
- snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
+ /* If page queues are supported, run with
+ * multiple queues(sdma gfx queue + page queue)
+ */
+ if (support_page) {
+ snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
+ 0x3 << ring_id, sysfs);
+ igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
+ ring_id++;
+ } else {
+ snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
0x1 << ring_id, sysfs);
+ igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
+ }
+
r = system(cmd);
igt_assert_eq(r, 0);
}
--
2.25.1
next reply other threads:[~2025-02-24 7:23 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-24 7:23 Jesse.zhang@amd.com [this message]
2025-02-24 19:32 ` [PATCH i-g-t] lib/amdgpu: ad support for page queues in amd_deadlock vitaly prosyak
2025-02-25 5:47 ` ✓ Xe.CI.BAT: success for " Patchwork
2025-02-25 6:07 ` ✓ i915.CI.BAT: " Patchwork
2025-02-25 8:04 ` ✗ i915.CI.Full: failure " Patchwork
2025-02-25 10:37 ` ✗ Xe.CI.Full: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250224072316.4117581-1-jesse.zhang@amd.com \
--to=jesse.zhang@amd.com \
--cc=alexander.deucher@amd.com \
--cc=christian.koenig@amd.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=vitaly.prosyak@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox