[PATCH i-g-t] lib/amdgpu: ad support for page queues in amd_deadlock
vitaly prosyak
vprosyak at amd.com
Mon Feb 24 19:32:35 UTC 2025
The change looks good to me with the following updates: move is_support_page_queue into the amd_ip_block file to allow other test cases to access this function, and fix some spelling errors.
Posted updated change.
Reviewed-by: Vitaly Prosyak <vitaly.prosyak.amd.com>
On 2025-02-24 02:23, Jesse.zhang at amd.com wrote:
> This commit introduces enhancements to the deadlock to handle
> page queues and modify the logic for enabling/disabling scheduling rings.
>
> - New Function `is_support_page_queue`:
> - Checks if page queue files exist for a given IP block type and PCI address.
>
> - Modify `amdgpu_wait_memory_helper`:
> - Updates the logic for enabling/disabling scheduling rings based on whether page queues are supported.
> - Calls `is_support_page_queue` to check if page queues are supported.
> - If page queues are supported, enables two rings (sdma gfx queue and page queue).
>
> - Similar Modifications in Other Functions:
> - Applies similar logic to handle page queues in `bad_access_ring_helper` and `amdgpu_hang_sdma_ring_helper`.
> - Ensures consistency across different helper functions, maintaining the same logic for handling page queues.
>
> Cc: Vitaly Prosyak <vitaly.prosyak at amd.com>
> Cc: Christian Koenig <christian.koenig at amd.com>
> Cc: Alexander Deucher <alexander.deucher at amd.com>
>
> Signed-off-by: Jesse Zhang <jesse.zhang at amd.com>
> ---
> lib/amdgpu/amd_deadlock_helpers.c | 96 ++++++++++++++++++++++++++-----
> 1 file changed, 81 insertions(+), 15 deletions(-)
>
> diff --git a/lib/amdgpu/amd_deadlock_helpers.c b/lib/amdgpu/amd_deadlock_helpers.c
> index d7bf0e111..3463653a7 100644
> --- a/lib/amdgpu/amd_deadlock_helpers.c
> +++ b/lib/amdgpu/amd_deadlock_helpers.c
> @@ -10,6 +10,7 @@
> #include <unistd.h>
> #include <pthread.h>
> #include <signal.h>
> +#include <glob.h>
> #include "amd_memory.h"
> #include "amd_deadlock_helpers.h"
> #include "lib/amdgpu/amd_command_submission.h"
> @@ -26,6 +27,31 @@ struct thread_param {
> static int
> use_uc_mtype = 1;
>
> +/* Function to check if page queue files exist for a given IP block type and PCI address */
> +static bool
> +is_support_page_queue(enum amd_ip_block_type ip_type, const struct pci_addr *pci)
> +{
> + glob_t glob_result;
> + int ret;
> + char search_pattern[1024];
> +
> + /* If the IP type is not SDMA, return false */
> + if (ip_type != AMD_IP_DMA)
> + return false;
> +
> + /* Construct the search pattern for the page queue files */
> + snprintf(search_pattern, sizeof(search_pattern) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_ring_page*",
> + pci->domain, pci->bus, pci->device, pci->function);
> +
> + /* Use glob to find files matching the pattern */
> + ret = glob(search_pattern, GLOB_NOSORT, NULL, &glob_result);
> + /* Free the memory allocated by glob */
> + globfree(&glob_result);
> +
> + /* Return true if files matching the pattern were found, otherwise return false */
> + return (ret == 0 && glob_result.gl_pathc > 0);
> +}
> +
> static void*
> write_mem_address(void *data)
> {
> @@ -179,16 +205,19 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
> FILE *fp;
> char cmd[1024];
> char buffer[128];
> - long sched_mask = 0;
> + uint64_t sched_mask = 0, ring_id;
> struct drm_amdgpu_info_hw_ip info;
> - uint32_t ring_id, prio;
> + uint32_t prio;
> char sysfs[125];
> + bool support_page;
>
> r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
> igt_assert_eq(r, 0);
> if (!info.available_rings)
> igt_info("SKIP ... as there's no ring for ip %d\n", ip_type);
>
> + support_page = is_support_page_queue(ip_type, pci);
> +
> if (ip_type == AMD_IP_GFX)
> snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_gfx_sched_mask",
> pci->domain, pci->bus, pci->device, pci->function);
> @@ -215,7 +244,7 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
> igt_info("The scheduling ring only enables one for ip %d\n", ip_type);
> }
>
> - for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
> + for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id += 1) {
> /* check sched is ready is on the ring. */
> if (!((1 << ring_id) & sched_mask))
> continue;
> @@ -239,9 +268,20 @@ void amdgpu_wait_memory_helper(amdgpu_device_handle device_handle, unsigned int
> }
>
> if (sched_mask > 1) {
> - snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> - 0x1 << ring_id, sysfs);
> - igt_info("Disable other rings, keep only ring: %d enabled, cmd: %s\n", ring_id, cmd);
> + /* If page queues are supported, run with
> + * multiple queues(sdma gfx queue + page queue)
> + */
> + if (support_page) {
> + snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> + 0x3 << ring_id, sysfs);
> + igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
> + ring_id++;
> +
> + } else {
> + snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> + 0x1 << ring_id, sysfs);
> + igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
> + }
> r = system(cmd);
> igt_assert_eq(r, 0);
> }
> @@ -411,16 +451,18 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
> FILE *fp;
> char cmd[1024];
> char buffer[128];
> - long sched_mask = 0;
> + uint64_t sched_mask = 0, ring_id;
> struct drm_amdgpu_info_hw_ip info;
> - uint32_t ring_id, prio;
> + uint32_t prio;
> char sysfs[125];
> + bool support_page;
>
> r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
> igt_assert_eq(r, 0);
> if (!info.available_rings)
> igt_info("SKIP ... as there's no ring for ip %d\n", ip_type);
>
> + support_page = is_support_page_queue(ip_type, pci);
> if (ip_type == AMD_IP_GFX)
> snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_gfx_sched_mask",
> pci->domain, pci->bus, pci->device, pci->function);
> @@ -447,7 +489,7 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
> igt_info("The scheduling ring only enables one for ip %d\n", ip_type);
> }
>
> - for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
> + for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id++) {
> /* check sched is ready is on the ring. */
> if (!((1 << ring_id) & sched_mask))
> continue;
> @@ -471,9 +513,20 @@ void bad_access_ring_helper(amdgpu_device_handle device_handle, unsigned int cmd
> }
>
> if (sched_mask > 1) {
> - snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> + /* If page queues are supported, run with
> + * multiple queues(sdma gfx queue + page queue)
> + */
> + if (support_page) {
> + snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> + 0x3 << ring_id, sysfs);
> + igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
> + ring_id++;
> + } else {
> + snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> 0x1 << ring_id, sysfs);
> - igt_info("Disable other rings, keep only ring: %d enabled, cmd: %s\n", ring_id, cmd);
> + igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
> + }
> +
> r = system(cmd);
> igt_assert_eq(r, 0);
> }
> @@ -496,16 +549,17 @@ void amdgpu_hang_sdma_ring_helper(amdgpu_device_handle device_handle, uint8_t ha
> FILE *fp;
> char cmd[1024];
> char buffer[128];
> - long sched_mask = 0;
> + uint64_t sched_mask = 0, ring_id;
> struct drm_amdgpu_info_hw_ip info;
> - uint32_t ring_id;
> char sysfs[125];
> + bool support_page;
>
> r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
> igt_assert_eq(r, 0);
> if (!info.available_rings)
> igt_info("SKIP ... as there's no ring for the sdma\n");
>
> + support_page = is_support_page_queue(AMDGPU_HW_IP_DMA, pci);
> snprintf(sysfs, sizeof(sysfs) - 1, "/sys/kernel/debug/dri/%04x:%02x:%02x.%01x/amdgpu_sdma_sched_mask",
> pci->domain, pci->bus, pci->device, pci->function);
> snprintf(cmd, sizeof(cmd) - 1, "sudo cat %s", sysfs);
> @@ -522,14 +576,26 @@ void amdgpu_hang_sdma_ring_helper(amdgpu_device_handle device_handle, uint8_t ha
> } else
> sched_mask = 1;
>
> - for (ring_id = 0; (0x1 << ring_id) <= sched_mask; ring_id++) {
> + for (ring_id = 0; ((uint64_t)0x1 << ring_id) <= sched_mask; ring_id++) {
> /* check sched is ready is on the ring. */
> if (!((1 << ring_id) & sched_mask))
> continue;
>
> if (sched_mask > 1) {
> - snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> + /* If page queues are supported, run with
> + * multiple queues(sdma gfx queue + page queue)
> + */
> + if (support_page) {
> + snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> + 0x3 << ring_id, sysfs);
> + igt_info("Disable other rings, keep ring: %ld and %ld enabled, cmd: %s\n", ring_id, ring_id + 1, cmd);
> + ring_id++;
> + } else {
> + snprintf(cmd, sizeof(cmd) - 1, "sudo echo 0x%x > %s",
> 0x1 << ring_id, sysfs);
> + igt_info("Disable other rings, keep only ring: %ld enabled, cmd: %s\n", ring_id, cmd);
> + }
> +
> r = system(cmd);
> igt_assert_eq(r, 0);
> }
More information about the igt-dev
mailing list