[PATCH 4/4] tests/amdgpu: add semaphore across process test
Chunming Zhou
David1.Zhou at amd.com
Thu Aug 18 07:55:59 UTC 2016
Change-Id: I6e8c8cfa1a05f51f3c03670baea68ed6da94fa11
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
tests/amdgpu/basic_tests.c | 131 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 131 insertions(+)
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index 02e863a..c7da54d 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -47,6 +47,7 @@ static void amdgpu_command_submission_sdma(void);
static void amdgpu_command_submission_multi_fence(void);
static void amdgpu_userptr_test(void);
static void amdgpu_semaphore_test(void);
+static void amdgpu_semaphore_across_process_test(void);
static void amdgpu_svm_test(void);
static void amdgpu_multi_svm_test(void);
static void amdgpu_va_range_test(void);
@@ -60,6 +61,7 @@ CU_TestInfo basic_tests[] = {
{ "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
{ "Command submission Test (Multi-fence)", amdgpu_command_submission_multi_fence },
{ "SW semaphore Test", amdgpu_semaphore_test },
+ { "SW semaphore across process Test", amdgpu_semaphore_across_process_test },
{ "VA range Test", amdgpu_va_range_test},
{ "SVM Test", amdgpu_svm_test },
{ "SVM Test (multi-GPUs)", amdgpu_multi_svm_test },
@@ -515,6 +517,135 @@ static void amdgpu_command_submission_gfx(void)
amdgpu_command_submission_gfx_shared_ib();
}
+static void amdgpu_semaphore_across_process_test(void)
+{
+ struct amdgpu_context *context_handle;
+ amdgpu_semaphore_handle sem;
+ amdgpu_bo_handle ib_result_handle[2];
+ void *ib_result_cpu[2];
+ uint64_t ib_result_mc_address[2];
+ struct amdgpu_cs_request ibs_request[2] = {0};
+ struct amdgpu_cs_ib_info ib_info[2] = {0};
+ struct amdgpu_cs_fence fence_status = {0};
+ uint32_t *ptr;
+ uint32_t expired;
+ uint32_t shared_handle;
+ amdgpu_bo_list_handle bo_list[2];
+ amdgpu_va_handle va_handle[2];
+ int r, i, pid;
+
+ r = amdgpu_cs_create_semaphore_object(device_handle, &sem);
+ CU_ASSERT_EQUAL(r, 0);
+ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+ AMDGPU_GEM_DOMAIN_GTT, 0,
+ &ib_result_handle[0], &ib_result_cpu[0],
+ &ib_result_mc_address[0], &va_handle[0]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_get_bo_list(device_handle, ib_result_handle[0],
+ NULL, &bo_list[0]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ptr = ib_result_cpu[0];
+ ptr[0] = SDMA_NOP;
+ ib_info[0].ib_mc_address = ib_result_mc_address[0];
+ ib_info[0].size = 1;
+
+ ibs_request[0].ip_type = AMDGPU_HW_IP_DMA;
+ ibs_request[0].number_of_ibs = 1;
+ ibs_request[0].ibs = &ib_info[0];
+ ibs_request[0].resources = bo_list[0];
+ ibs_request[0].fence_info.handle = NULL;
+ r = amdgpu_cs_submit(context_handle, 0,&ibs_request[0], 1);
+ CU_ASSERT_EQUAL(r, 0);
+ r = amdgpu_cs_signal_semaphore(context_handle, AMDGPU_HW_IP_DMA, 0, 0, sem);
+ CU_ASSERT_EQUAL(r, 0);
+ r = amdgpu_cs_export_semaphore(sem, &shared_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ pid = fork();
+ /* child process */
+ if (pid == 0) {
+ amdgpu_device_handle child_device_handle;
+ uint32_t child_major_version;
+ uint32_t child_minor_version;
+ amdgpu_semaphore_handle child_sem;
+ amdgpu_context_handle context_handle1;
+
+ r = amdgpu_device_initialize(drm_amdgpu[0], &child_major_version,
+ &child_minor_version, &child_device_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_alloc_and_map(child_device_handle, 4096, 4096,
+ AMDGPU_GEM_DOMAIN_GTT, 0,
+ &ib_result_handle[1], &ib_result_cpu[1],
+ &ib_result_mc_address[1], &va_handle[1]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_get_bo_list(child_device_handle, ib_result_handle[1],
+ NULL, &bo_list[1]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_import_semaphore(&child_sem, child_device_handle, shared_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_ctx_create(child_device_handle, &context_handle1);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_wait_semaphore(context_handle1, AMDGPU_HW_IP_GFX, 0, 0, child_sem);
+ CU_ASSERT_EQUAL(r, 0);
+ ptr = ib_result_cpu[1];
+ ptr[0] = GFX_COMPUTE_NOP;
+ ib_info[1].ib_mc_address = ib_result_mc_address[1];
+ ib_info[1].size = 1;
+
+ ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
+ ibs_request[1].number_of_ibs = 1;
+ ibs_request[1].ibs = &ib_info[1];
+ ibs_request[1].resources = bo_list[1];
+ ibs_request[1].fence_info.handle = NULL;
+
+ r = amdgpu_cs_submit(context_handle1, 0,&ibs_request[1], 1);
+ CU_ASSERT_EQUAL(r, 0);
+
+ fence_status.context = context_handle1;
+ fence_status.ip_type = AMDGPU_HW_IP_GFX;
+ fence_status.fence = ibs_request[1].seq_no;
+ r = amdgpu_cs_query_fence_status(&fence_status,
+ 500000000, 0, &expired);
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(expired, true);
+
+ r = amdgpu_bo_unmap_and_free(ib_result_handle[1], va_handle[1],
+ ib_result_mc_address[1], 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_list_destroy(bo_list[1]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_ctx_free(context_handle1);
+ CU_ASSERT_EQUAL(r, 0);
+
+ amdgpu_device_deinitialize(child_device_handle);
+ } else {
+ r = amdgpu_bo_unmap_and_free(ib_result_handle[0], va_handle[0],
+ ib_result_mc_address[0], 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_list_destroy(bo_list[0]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_ctx_free(context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_destroy_semaphore(sem);
+ CU_ASSERT_EQUAL(r, 0);
+ }
+}
+
static void amdgpu_semaphore_test(void)
{
amdgpu_context_handle context_handle[2];
--
1.9.1
More information about the amd-gfx
mailing list