[PATCH] tests/amd_queue_reset: add expected error codes
Zhang, Jesse(Jie)
Jesse.Zhang at amd.com
Fri Sep 13 02:28:04 UTC 2024
[AMD Official Use Only - AMD Internal Distribution Only]
This patch look good for me.
Reviewed-by: Jesse Zhang <Jesse.Zhang at amd.com>
-----Original Message-----
From: vitaly.prosyak at amd.com <vitaly.prosyak at amd.com>
Sent: Friday, September 13, 2024 7:49 AM
To: igt-dev at lists.freedesktop.org
Cc: Prosyak, Vitaly <Vitaly.Prosyak at amd.com>; Zhang, Jesse(Jie) <Jesse.Zhang at amd.com>; Deucher, Alexander <Alexander.Deucher at amd.com>; Koenig, Christian <Christian.Koenig at amd.com>
Subject: [PATCH] tests/amd_queue_reset: add expected error codes
From: Vitaly Prosyak <vitaly.prosyak at amd.com>
The necessity for this change arose from the need to centralize and document expected behavior for specific error conditions.
This centralized approach allows for easier maintenance, making it quick and simple to modify expected error codes, ensuring they do not block CI pipelines. At this stage, we cannot enforce strict error codes, as we are still addressing resilience issues related to failures in cs_submit.
Add expected reset codes for each error scenario and validate the return code during the appropriate test execution. These error codes are part of a global table, with separate codes declared for different IP blocks (compute, GFX, SDMA).
Cc: Jesse Zhang <Jesse.Zhang at amd.com>
Cc: Alex Deucher <alexander.deucher at amd.com>
Cc: Christian Koenig <christian.koenig at amd.com>
Signed-off-by: Vitaly Prosyak <vitaly.prosyak at amd.com>
---
lib/amdgpu/amd_command_submission.c | 2 ++
lib/amdgpu/amd_dispatch.c | 46 +++++++++++++++++++----------
lib/amdgpu/amd_dispatch.h | 4 ++-
lib/amdgpu/amd_ip_blocks.h | 15 ++++++++++
tests/amdgpu/amd_queue_reset.c | 45 ++++++++++++++++++++--------
5 files changed, 83 insertions(+), 29 deletions(-)
diff --git a/lib/amdgpu/amd_command_submission.c b/lib/amdgpu/amd_command_submission.c
index a0c72fb47..df278ad69 100644
--- a/lib/amdgpu/amd_command_submission.c
+++ b/lib/amdgpu/amd_command_submission.c
@@ -74,6 +74,7 @@ int amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type
/* submit CS */
r = amdgpu_cs_submit(ring_context->context_handle, 0, &ring_context->ibs_request, 1);
+ ring_context->err_codes.err_code_cs_submit = r;
if (expect_failure)
igt_info("amdgpu_cs_submit %d PID %d\n", r, getpid());
else {
@@ -95,6 +96,7 @@ int amdgpu_test_exec_cs_helper(amdgpu_device_handle device, unsigned int ip_type
r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE,
0, &expired);
+ ring_context->err_codes.err_code_wait_for_fence = r;
if (expect_failure) {
igt_info("EXPECT FAILURE amdgpu_cs_query_fence_status %d expired %d PID %d\n", r, expired, getpid());
} else {
diff --git a/lib/amdgpu/amd_dispatch.c b/lib/amdgpu/amd_dispatch.c index 0de0ce816..5b4698a83 100644
--- a/lib/amdgpu/amd_dispatch.c
+++ b/lib/amdgpu/amd_dispatch.c
@@ -161,10 +161,13 @@ amdgpu_memset_dispatch_test(amdgpu_device_handle device_handle,
int
amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
- uint32_t ip_type, uint32_t ring, uint32_t version,
- enum cmd_error_type hang)
+ amdgpu_context_handle context_handle_param,
+ uint32_t ip_type, uint32_t ring, uint32_t version,
+ enum cmd_error_type hang,
+ struct amdgpu_cs_err_codes *err_codes)
{
- amdgpu_context_handle context_handle;
+ amdgpu_context_handle context_handle_free = NULL;
+ amdgpu_context_handle context_handle_in_use = NULL;
amdgpu_bo_handle bo_src, bo_dst, bo_shader, bo_cmd, resources[4];
volatile unsigned char *ptr_dst;
void *ptr_shader;
@@ -184,8 +187,13 @@ amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
struct amdgpu_cs_fence fence_status = {0};
struct amdgpu_cmd_base *base_cmd = get_cmd_base();
- r = amdgpu_cs_ctx_create(device_handle, &context_handle);
- igt_assert_eq(r, 0);
+ if (context_handle_param == NULL) {
+ r = amdgpu_cs_ctx_create(device_handle, &context_handle_in_use);
+ context_handle_free = context_handle_in_use;
+ igt_assert_eq(r, 0);
+ } else {
+ context_handle_in_use = context_handle_param;
+ }
r = amdgpu_bo_alloc_and_map(device_handle, bo_cmd_size, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
@@ -300,19 +308,22 @@ amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info;
ibs_request.fence_info.handle = NULL;
- r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- igt_assert_eq(r, 0);
+ r = amdgpu_cs_submit(context_handle_in_use, 0, &ibs_request, 1);
+ if (err_codes)
+ err_codes->err_code_cs_submit = r;
fence_status.ip_type = ip_type;
fence_status.ip_instance = 0;
fence_status.ring = ring;
- fence_status.context = context_handle;
+ fence_status.context = context_handle_in_use;
fence_status.fence = ibs_request.seq_no;
/* wait for IB accomplished */
r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE,
0, &expired);
+ if (err_codes)
+ err_codes->err_code_wait_for_fence = r;
if (!hang) {
igt_assert_eq(r, 0);
@@ -326,7 +337,7 @@ amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
i++;
}
} else {
- r2 = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
+ r2 = amdgpu_cs_query_reset_state(context_handle_in_use, &hang_state,
+&hangs);
igt_assert_eq(r2, 0);
}
@@ -336,7 +347,10 @@ amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
amdgpu_bo_unmap_and_free(bo_cmd, va_cmd, mc_address_cmd, bo_cmd_size);
amdgpu_bo_unmap_and_free(bo_shader, va_shader, mc_address_shader,
bo_shader_size);
- amdgpu_cs_ctx_free(context_handle);
+
+ if(context_handle_free)
+ amdgpu_cs_ctx_free(context_handle_free);
+
return r;
}
@@ -538,13 +552,13 @@ amdgpu_dispatch_hang_slow_helper(amdgpu_device_handle device_handle,
return;
}
for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memcpy_dispatch_test(device_handle, ip_type,
- ring_id, version, BACKEND_SE_GC_SHADER_EXEC_SUCCESS);
+ amdgpu_memcpy_dispatch_test(device_handle, NULL, ip_type,
+ ring_id, version, BACKEND_SE_GC_SHADER_EXEC_SUCCESS, NULL);
amdgpu_memcpy_dispatch_hang_slow_test(device_handle, ip_type,
ring_id, version, AMDGPU_CTX_UNKNOWN_RESET);
- amdgpu_memcpy_dispatch_test(device_handle, ip_type, ring_id,
- version, BACKEND_SE_GC_SHADER_EXEC_SUCCESS);
+ amdgpu_memcpy_dispatch_test(device_handle, NULL, ip_type, ring_id,
+ version, BACKEND_SE_GC_SHADER_EXEC_SUCCESS, NULL);
}
}
@@ -570,8 +584,8 @@ void amdgpu_gfx_dispatch_test(amdgpu_device_handle device_handle, uint32_t ip_ty
for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
amdgpu_memset_dispatch_test(device_handle, ip_type, ring_id,
version);
- amdgpu_memcpy_dispatch_test(device_handle, ip_type, ring_id,
- version, hang);
+ amdgpu_memcpy_dispatch_test(device_handle, NULL, ip_type, ring_id,
+ version, hang, NULL);
}
}
diff --git a/lib/amdgpu/amd_dispatch.h b/lib/amdgpu/amd_dispatch.h index 9aa7a1b78..89c448a1f 100644
--- a/lib/amdgpu/amd_dispatch.h
+++ b/lib/amdgpu/amd_dispatch.h
@@ -31,10 +31,12 @@ void amdgpu_gfx_dispatch_test(amdgpu_device_handle device_handle,
uint32_t ip_type, enum cmd_error_type hang);
int amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
+ amdgpu_context_handle context_handle,
uint32_t ip_type,
uint32_t ring,
uint32_t version,
- enum cmd_error_type hang);
+ enum cmd_error_type hang,
+ struct amdgpu_cs_err_codes *err_codes);
void amdgpu_dispatch_hang_slow_helper(amdgpu_device_handle device_handle,
uint32_t ip_type);
diff --git a/lib/amdgpu/amd_ip_blocks.h b/lib/amdgpu/amd_ip_blocks.h index 3e729f4c0..161f841cf 100644
--- a/lib/amdgpu/amd_ip_blocks.h
+++ b/lib/amdgpu/amd_ip_blocks.h
@@ -57,15 +57,29 @@ struct asic_id_filter
int chip_id_end;
};
+/* expected reset result for differant ip's */ struct reset_err_result
+{
+ int compute_reset_result;
+ int gfx_reset_result;
+ int sdma_reset_result;
+};
+
struct dynamic_test{
enum cmd_error_type test;
const char *name;
const char *describe;
struct asic_id_filter exclude_filter[_MAX_NUM_ASIC_ID_EXCLUDE_FILTER];
+ struct reset_err_result result;
};
#define for_each_test(t, T) for(typeof(*T) *t = T; t->name; t++)
+/* set during execution */
+struct amdgpu_cs_err_codes {
+ int err_code_cs_submit;
+ int err_code_wait_for_fence;
+};
+
/* aux struct to hold misc parameters for convenience to maintain */ struct amdgpu_ring_context {
@@ -112,6 +126,7 @@ struct amdgpu_ring_context {
struct amdgpu_cs_ib_info ib_info; /* amdgpu_bo_list_create */
struct amdgpu_cs_request ibs_request; /* amdgpu_cs_query_fence_status */
+ struct amdgpu_cs_err_codes err_codes;
};
diff --git a/tests/amdgpu/amd_queue_reset.c b/tests/amdgpu/amd_queue_reset.c index 177a22b3e..b6fe0e64e 100644
--- a/tests/amdgpu/amd_queue_reset.c
+++ b/tests/amdgpu/amd_queue_reset.c
@@ -44,6 +44,7 @@ struct job_struct {
unsigned int error;
enum amd_ip_block_type ip;
unsigned int ring_id;
+ int reset_err_result;
/* additional data if necessary */
};
@@ -306,7 +307,8 @@ static void wait_for_complete_iteration(struct shmbuf *sh_mem)
static void set_next_test_to_run(struct shmbuf *sh_mem, unsigned int error,
enum amd_ip_block_type ip_good, enum amd_ip_block_type ip_bad,
- unsigned int ring_id_good, unsigned int ring_id_bad)
+ unsigned int ring_id_good, unsigned int ring_id_bad,
+ const struct reset_err_result *result)
{
char error_str[128];
char ip_good_str[64];
@@ -324,8 +326,16 @@ static void set_next_test_to_run(struct shmbuf *sh_mem, unsigned int error,
sh_mem->bad_job.error = error;
sh_mem->bad_job.ip = ip_bad;
sh_mem->bad_job.ring_id = ring_id_bad;
+ if (ip_bad == AMD_IP_GFX)
+ sh_mem->bad_job.reset_err_result = result->gfx_reset_result;
+ else if (ip_bad == AMD_IP_COMPUTE)
+ sh_mem->bad_job.reset_err_result = result->compute_reset_result;
+ else
+ sh_mem->bad_job.reset_err_result = result->sdma_reset_result;
+
sh_mem->good_job.error = CMD_STREAM_EXEC_SUCCESS;
sh_mem->good_job.ip = ip_good;
+ sh_mem->good_job.reset_err_result = 0;
sh_mem->good_job.ring_id = ring_id_good;
sh_mem->sub_test_is_skipped = false;
sh_mem->sub_test_is_existed = true;
@@ -482,7 +492,7 @@ is_sub_test_queue_reset_enable(const struct amdgpu_gpu_info *gpu_info, static int amdgpu_write_linear(amdgpu_device_handle device, amdgpu_context_handle context_handle,
const struct amdgpu_ip_block_version *ip_block,
- const struct job_struct *job)
+ const struct job_struct *job, struct amdgpu_cs_err_codes *err_codes)
{
const int pm4_dw = 256;
struct amdgpu_ring_context *ring_context; @@ -527,6 +537,8 @@ amdgpu_write_linear(amdgpu_device_handle device, amdgpu_context_handle context_h
r = amdgpu_test_exec_cs_helper(device, ip_block->type, ring_context,
expect_failure);
+ err_codes->err_code_cs_submit = ring_context->err_codes.err_code_cs_submit;
+ err_codes->err_code_wait_for_fence =
+ring_context->err_codes.err_code_wait_for_fence;
amdgpu_bo_unmap_and_free(ring_context->bo, ring_context->va_handle,
ring_context->bo_mc, ring_context->write_length * sizeof(uint32_t)); @@ -646,6 +658,7 @@ run_test_child(amdgpu_device_handle device, amdgpu_context_handle *arr_context,
struct job_struct job;
const struct amdgpu_ip_block_version *ip_block_test = NULL;
+ struct amdgpu_cs_err_codes err_codes;
while (num_of_tests > 0) {
sync_point_enter(sh_mem);
@@ -660,12 +673,15 @@ run_test_child(amdgpu_device_handle device, amdgpu_context_handle *arr_context,
bool_ret = is_dispatch_shader_test(job.error, error_str, &is_dispatch);
igt_assert_eq(bool_ret, 1);
ip_block_test = get_ip_block(device, job.ip);
+ err_codes.err_code_cs_submit = 0;
+ err_codes.err_code_wait_for_fence = 0;
+
if (is_dispatch) {
- ret = amdgpu_memcpy_dispatch_test(device, job.ip, job.ring_id, version,
- job.error);
+ ret = amdgpu_memcpy_dispatch_test(device, arr_context[test_counter], job.ip, job.ring_id, version,
+ job.error, &err_codes);
} else {
ret = amdgpu_write_linear(device, arr_context[test_counter],
- ip_block_test, &job);
+ ip_block_test, &job, &err_codes);
}
num_of_tests--;
@@ -676,6 +692,9 @@ run_test_child(amdgpu_device_handle device, amdgpu_context_handle *arr_context,
break;
sleep(1);
}
+ /* validate expected result */
+ //igt_assert_eq(err_codes.err_code_wait_for_fence,
+job.reset_err_result);
+
sync_point_exit(sh_mem);
test_counter++;
}
@@ -697,6 +716,7 @@ run_background(amdgpu_device_handle device, struct shmbuf *sh_mem,
const struct amdgpu_ip_block_version *ip_block_test = NULL;
int error_code;
unsigned int flags;
+ struct amdgpu_cs_err_codes err_codes;
r = amdgpu_cs_ctx_create(device, &context_handle);
igt_assert_eq(r, 0);
@@ -713,7 +733,7 @@ run_background(amdgpu_device_handle device, struct shmbuf *sh_mem,
ip_block_test = get_ip_block(device, job.ip);
is_dispatch_shader_test(job.error, error_str, &is_dispatch);
while (1) {
- r = amdgpu_write_linear(device, context_handle, ip_block_test, &job);
+ r = amdgpu_write_linear(device, context_handle, ip_block_test,
+&job, &err_codes);
if (counter > NUM_ITERATION && counter % NUM_ITERATION == 0)
igt_debug("+++BACKGROUND++ amdgpu_write_linear for %s ring_id %d ret %d counter %d\n", @@ -726,6 +746,7 @@ run_background(amdgpu_device_handle device, struct shmbuf *sh_mem,
}
if (r != -ECANCELED && r != -ETIME && r != -ENODATA)
igt_assert_eq(r, 0);
+ //igt_assert_eq(err_codes.err_code_wait_for_fence,
+job.reset_err_result);
/*
* TODO we have issue during gpu reset the return code assert we put after we check the
* test is completed otherwise the job is failed due to @@ -1082,10 +1103,10 @@ igt_main
struct dynamic_test arr_err[] = {
{CMD_STREAM_EXEC_INVALID_PACKET_LENGTH, "CMD_STREAM_EXEC_INVALID_PACKET_LENGTH",
"Stressful-and-multiple-cs-of-bad and good length-operations-using-multiple-processes",
- { { FAMILY_GFX1100, 0x1, 0xFF }, {FAMILY_AI, 0x32, 0xFF }, {FAMILY_AI, 0x3C, 0xFF } } },
+ { { FAMILY_GFX1100, 0x1, 0xFF }, {FAMILY_AI, 0x32, 0xFF },
+{FAMILY_AI, 0x3C, 0xFF } } , {-ECANCELED, -ECANCELED, 0 } },
{CMD_STREAM_EXEC_INVALID_OPCODE, "CMD_STREAM_EXEC_INVALID_OPCODE",
"Stressful-and-multiple-cs-of-bad and good opcode-operations-using-multiple-processes",
- { {FAMILY_UNKNOWN, -1, -1 }, {FAMILY_UNKNOWN, -1, -1 }, {FAMILY_UNKNOWN, -1, -1 } } },
+ { {FAMILY_UNKNOWN, -1, -1 }, {FAMILY_UNKNOWN, -1, -1 },
+{FAMILY_UNKNOWN, -1, -1 } } , { 0, -ECANCELED, -ECANCELED } },
//TODO not job timeout, debug why for n31.
//{CMD_STREAM_TRANS_BAD_MEM_ADDRESS_BY_SYNC,"CMD_STREAM_TRANS_BAD_MEM_ADDRESS_BY_SYNC",
// "Stressful-and-multiple-cs-of-bad and good mem-sync-operations-using-multiple-processes"},
@@ -1094,16 +1115,16 @@ igt_main
// "Stressful-and-multiple-cs-of-bad and good reg-operations-using-multiple-processes"},
{BACKEND_SE_GC_SHADER_INVALID_PROGRAM_ADDR, "BACKEND_SE_GC_SHADER_INVALID_PROGRAM_ADDR",
"Stressful-and-multiple-cs-of-bad and good shader-operations-using-multiple-processes",
- { {FAMILY_UNKNOWN, 0x1, 0x10 }, {FAMILY_AI, 0x32, 0x3C }, {FAMILY_AI, 0x3C, 0xFF } } },
+ { {FAMILY_UNKNOWN, 0x1, 0x10 }, {FAMILY_AI, 0x32, 0x3C },
+{FAMILY_AI, 0x3C, 0xFF } } , { -ECANCELED, -ECANCELED, -ECANCELED } },
//TODO KGQ cannot recover by queue reset, it maybe need a fw bugfix on naiv31
//{BACKEND_SE_GC_SHADER_INVALID_PROGRAM_SETTING,"BACKEND_SE_GC_SHADER_INVALID_PROGRAM_SETTING",
// "Stressful-and-multiple-cs-of-bad and good shader-operations-using-multiple-processes"},
{BACKEND_SE_GC_SHADER_INVALID_USER_DATA, "BACKEND_SE_GC_SHADER_INVALID_USER_DATA",
"Stressful-and-multiple-cs-of-bad and good shader-operations-using-multiple-processes",
- { {FAMILY_UNKNOWN, -1, -1 }, {FAMILY_AI, 0x32, 0x3C }, {FAMILY_AI, 0x3C, 0xFF } } },
+ { {FAMILY_UNKNOWN, -1, -1 }, {FAMILY_AI, 0x32, 0x3C }, {FAMILY_AI,
+0x3C, 0xFF } } , { -ECANCELED, -ECANCELED, -ECANCELED } },
{BACKEND_SE_GC_SHADER_INVALID_SHADER, "BACKEND_SE_GC_SHADER_INVALID_SHADER",
"Stressful-and-multiple-cs-of-bad and good shader-operations-using-multiple-processes",
- { {FAMILY_UNKNOWN, 0x1, 0x10 }, {FAMILY_AI, 0x32, 0x3C }, {FAMILY_AI, 0x3C, 0xFF } } },
+ { {FAMILY_UNKNOWN, 0x1, 0x10 }, {FAMILY_AI, 0x32, 0x3C },
+{FAMILY_AI, 0x3C, 0xFF } } , { -ECANCELED, -ECANCELED, -ECANCELED } },
{}
};
@@ -1175,7 +1196,7 @@ igt_main
info[i].available_rings, ip_background != ip_tests[i], &ring_id_job_good, &ring_id_job_bad)) {
igt_dynamic_f("amdgpu-%s-ring-good-%d-bad-%d-%s", it->name, ring_id_job_good, ring_id_job_bad,
ip_tests[i] == AMD_IP_COMPUTE ? "COMPUTE":"GFX")
- set_next_test_to_run(sh_mem, it->test, ip_background, ip_tests[i], ring_id_job_good, ring_id_job_bad);
+ set_next_test_to_run(sh_mem, it->test, ip_background, ip_tests[i],
+ring_id_job_good, ring_id_job_bad, &it->result);
} else {
set_next_test_to_skip(sh_mem);
}
--
2.25.1
More information about the igt-dev
mailing list