[PATCH] lib/amdgpu: Add support for page queues in amd_deadlock

vitaly.prosyak at amd.com vitaly.prosyak at amd.com
Mon Feb 24 19:30:52 UTC 2025


From: "Jesse.zhang at amd.com" <Jesse.zhang at amd.com>

This commit enhances deadlock handling by adding support
for page queues and refining the logic for enabling and
disabling scheduling rings.

1. New Function is_support_page_queue:
   - Checks for the existence of page queue files for a given
     IP block type and PCI address.

2. Updated amdgpu_wait_memory_helper:
   - Adjusts scheduling ring logic based on page queue support.
   - Calls is_support_page_queue to determine page queue availability.
   - Enables two rings (SDMA GFX queue and page queue) if supported.

3. Similar Updates to Other Functions:
    - Applies consistent page queue handling in
      bad_access_ring_helper and amdgpu_hang_sdma_ring_helper.
    - Ensures uniform logic across all relevant helper functions.

V2: Move is_support_page_queue into the amd_ip_block file to
    allow other test cases to access this function,
    and rephrase the commit message (Vitaly)

Cc: Vitaly Prosyak <vitaly.prosyak at amd.com>
Cc: Christian Koenig <christian.koenig at amd.com>
Cc: Alexander Deucher <alexander.deucher at amd.com>

Signed-off-by: Jesse Zhang <jesse.zhang at amd.com>
Reviewed-by: Vitaly Prosyak <vitaly.prosyak at amd.com>
---
 lib/amdgpu/amd_deadlock_helpers.c | 70 ++++++++++++++++++++++++-------
 lib/amdgpu/amd_ip_blocks.c        | 27 ++++++++++++
 lib/amdgpu/amd_ip_blocks.h        |  3 ++
 3 files changed, 85 insertions(+), 15 deletions(-)

diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
index d7bf0e111..c8a48930e 100644
--- a/lib/amdgpu/amd_deadlock_helpers.c
+++ b/lib/amdgpu/amd_deadlock_helpers.c
@@ -179,16 +179,19 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
 	FILE *fp;
 	char cmd[1024];
 	char buffer[128];
-	long sched_mask = 0;
+	uint64_t sched_mask = 0, ring_id;
 	struct drm_amdgpu_info_hw_ip info;
-	uint32_t ring_id, prio;
+	uint32_t  prio;
 	char sysfs[125];
+	bool support_page;
 
 	r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
 	igt_assert_eq(r, 0);
 	if (!info.available_rings)
 		igt_info("SKIP ... as there's no ring for ip %d\n", ip_type);
 
+	support_page = is_support_page_queue(ip_type, pci);
+
 	if (ip_type == AMD_IP_GFX)
 		snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_gfx_sched_mask",
 			pci->domain, pci->bus, pci->device, pci->function);
@@ -215,7 +218,7 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
 		igt_info("The scheduling ring only enables one for ip %d\n", ip_type);
 	}
 
-	for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
+	for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id += 1) {
 		/* check sched is ready is on the ring. */
 		if (!((1 << ring_id) & sched_mask))
 			continue;
@@ -239,9 +242,20 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
 		}
 
 		if (sched_mask > 1) {
-			snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
-						0x1 << ring_id, sysfs);
-			igt_info("Disable other rings, keep only ring: %d enabled, cmd: %s\n", ring_id, cmd);
+			/* If page queues are supported, run with
+			 * multiple queues(sdma gfx queue + page queue)
+			 */
+			if (support_page) {
+				snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
+							0x3 << ring_id, sysfs);
+				igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
+				ring_id++;
+
+			} else {
+				snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
+							0x1 << ring_id, sysfs);
+				igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
+			}
 			r = system(cmd);
 			igt_assert_eq(r, 0);
 		}
@@ -411,16 +425,18 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
 	FILE *fp;
 	char cmd[1024];
 	char buffer[128];
-	long sched_mask = 0;
+	uint64_t sched_mask = 0, ring_id;
 	struct drm_amdgpu_info_hw_ip info;
-	uint32_t ring_id, prio;
+	uint32_t prio;
 	char sysfs[125];
+	bool support_page;
 
 	r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
 	igt_assert_eq(r, 0);
 	if (!info.available_rings)
 		igt_info("SKIP ... as there's no ring for ip %d\n", ip_type);
 
+	support_page = is_support_page_queue(ip_type, pci);
 	if (ip_type == AMD_IP_GFX)
 		snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_gfx_sched_mask",
 			pci->domain, pci->bus, pci->device, pci->function);
@@ -447,7 +463,7 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
 		igt_info("The scheduling ring only enables one for ip %d\n", ip_type);
 	}
 
-	for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
+	for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id++) {
 		/* check sched is ready is on the ring. */
 		if (!((1 << ring_id) & sched_mask))
 			continue;
@@ -471,9 +487,20 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
 		}
 
 		if (sched_mask > 1) {
-			snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
+			/* If page queues are supported, run with
+			 * multiple queues(sdma gfx queue + page queue)
+			 */
+			if (support_page) {
+				snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
+						0x3 << ring_id, sysfs);
+				igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
+				ring_id++;
+			} else {
+				snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
 						0x1 << ring_id, sysfs);
-			igt_info("Disable other rings, keep only ring: %d enabled, cmd: %s\n", ring_id, cmd);
+				igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
+			}
+
 			r = system(cmd);
 			igt_assert_eq(r, 0);
 		}
@@ -496,16 +523,17 @@ void amdgpu_hang_sdma_ring_helper(amdgpu_device_handle device_handle, uint8_t ha
 	FILE *fp;
 	char cmd[1024];
 	char buffer[128];
-	long sched_mask = 0;
+	uint64_t sched_mask = 0, ring_id;
 	struct drm_amdgpu_info_hw_ip info;
-	uint32_t ring_id;
 	char sysfs[125];
+	bool support_page;
 
 	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
 	igt_assert_eq(r, 0);
 	if (!info.available_rings)
 		igt_info("SKIP ... as there's no ring for the sdma\n");
 
+	support_page = is_support_page_queue(AMDGPU_HW_IP_DMA, pci);
 	snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_sdma_sched_mask",
 			pci->domain, pci->bus, pci->device, pci->function);
 	snprintf(cmd, sizeof(cmd) - 1, "sudo cat %s", sysfs);
@@ -522,14 +550,26 @@ void amdgpu_hang_sdma_ring_helper(amdgpu_device_handle device_handle, uint8_t ha
 	} else
 		sched_mask = 1;
 
-	for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
+	for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id++) {
 		/* check sched is ready is on the ring. */
 		if (!((1 << ring_id) & sched_mask))
 			continue;
 
 		if (sched_mask > 1) {
-			snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
+			/* If page queues are supported, run with
+			 * multiple queues(sdma gfx queue + page queue)
+			 */
+			if (support_page) {
+				snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
+						0x3 << ring_id, sysfs);
+				igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
+				ring_id++;
+			} else {
+				snprintf(cmd, sizeof(cmd) - 1, "sudo echo  0x%x > %s",
 						0x1 << ring_id, sysfs);
+				igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
+			}
+
 			r = system(cmd);
 			igt_assert_eq(r, 0);
 		}
diff --git a/lib/amdgpu/amd_ip_blocks.c b/lib/amdgpu/amd_ip_blocks.c
index 7c71a8952..4eaf639b3 100644
--- a/lib/amdgpu/amd_ip_blocks.c
+++ b/lib/amdgpu/amd_ip_blocks.c
@@ -6,6 +6,7 @@
  */
 
 #include <fcntl.h>
+#include <glob.h>
 
 #include "amd_memory.h"
 #include "amd_ip_blocks.h"
@@ -1132,3 +1133,29 @@ int get_pci_addr_from_fd(int fd, struct pci_addr *pci)
 
 	return 0;
 }
+
+/*
+ * Function to check if page queue files exist for a given IP block type and PCI address
+ */
+bool is_support_page_queue(enum amd_ip_block_type ip_type, const struct pci_addr *pci)
+{
+	glob_t glob_result;
+	int ret;
+	char search_pattern[1024];
+
+	/* If the IP type is not SDMA, return false */
+	if (ip_type != AMD_IP_DMA)
+		return false;
+
+	/* Construct the search pattern for the page queue files */
+	snprintf(search_pattern, sizeof(search_pattern) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_ring_page*",
+		pci->domain, pci->bus, pci->device, pci->function);
+
+	/* Use glob to find files matching the pattern */
+	ret = glob(search_pattern, GLOB_NOSORT, NULL, &glob_result);
+	/* Free the memory allocated by glob */
+	globfree(&glob_result);
+
+	/* Return true if files matching the pattern were found, otherwise return false */
+	return (ret == 0 && glob_result.gl_pathc > 0);
+}
diff --git a/lib/amdgpu/amd_ip_blocks.h b/lib/amdgpu/amd_ip_blocks.h
index 595b2b16b..fc9df6c78 100644
--- a/lib/amdgpu/amd_ip_blocks.h
+++ b/lib/amdgpu/amd_ip_blocks.h
@@ -237,4 +237,7 @@ is_reset_enable(enum amd_ip_block_type ip_type, uint32_t reset_type, const struc
 
 int
 get_pci_addr_from_fd(int fd, struct pci_addr *pci);
+
+bool
+is_support_page_queue(enum amd_ip_block_type ip_type, const struct pci_addr *pci);
 #endif
-- 
2.34.1



More information about the igt-dev mailing list