[PATCH 1/2] amdgpu: create only one IB for "all compute queues" tests
Christian König
deathsimple at vodafone.de
Wed Mar 16 12:56:52 UTC 2016
Can somebody with commit access push those two patches after the review?
I don't have write permission for libdrm.
Thanks in advance,
Christian.
Am 16.03.2016 um 13:54 schrieb Christian König:
> From: Christian König <christian.koenig at amd.com>
>
> It's simpler and allows us to test VMID sharing between
> between the compute queues as well.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
> tests/amdgpu/basic_tests.c | 40 ++++++++++++++++++++--------------------
> 1 file changed, 20 insertions(+), 20 deletions(-)
>
> diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
> index 4ef6014..d2086ce 100644
> --- a/tests/amdgpu/basic_tests.c
> +++ b/tests/amdgpu/basic_tests.c
> @@ -620,25 +620,25 @@ static void amdgpu_command_submission_compute(void)
> r = amdgpu_cs_ctx_create(device_handle, &context_handle);
> CU_ASSERT_EQUAL(r, 0);
>
> - for (instance = 0; instance < 8; instance++) {
> - r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
> - AMDGPU_GEM_DOMAIN_GTT, 0,
> - &ib_result_handle, &ib_result_cpu,
> - &ib_result_mc_address, &va_handle);
> - CU_ASSERT_EQUAL(r, 0);
> + r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
> + AMDGPU_GEM_DOMAIN_GTT, 0,
> + &ib_result_handle, &ib_result_cpu,
> + &ib_result_mc_address, &va_handle);
> + CU_ASSERT_EQUAL(r, 0);
>
> - r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
> + r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
> &bo_list);
> - CU_ASSERT_EQUAL(r, 0);
> + CU_ASSERT_EQUAL(r, 0);
>
> - ptr = ib_result_cpu;
> - for (i = 0; i < 16; ++i)
> - ptr[i] = 0xffff1000;
> + ptr = ib_result_cpu;
> + for (i = 0; i < 16; ++i)
> + ptr[i] = 0xffff1000;
>
> - memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
> - ib_info.ib_mc_address = ib_result_mc_address;
> - ib_info.size = 16;
> + memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
> + ib_info.ib_mc_address = ib_result_mc_address;
> + ib_info.size = 16;
>
> + for (instance = 0; instance < 8; instance++) {
> memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
> ibs_request.ip_type = AMDGPU_HW_IP_COMPUTE;
> ibs_request.ring = instance;
> @@ -660,14 +660,14 @@ static void amdgpu_command_submission_compute(void)
> AMDGPU_TIMEOUT_INFINITE,
> 0, &expired);
> CU_ASSERT_EQUAL(r, 0);
> + }
>
> - r = amdgpu_bo_list_destroy(bo_list);
> - CU_ASSERT_EQUAL(r, 0);
> + r = amdgpu_bo_list_destroy(bo_list);
> + CU_ASSERT_EQUAL(r, 0);
>
> - r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
> - ib_result_mc_address, 4096);
> - CU_ASSERT_EQUAL(r, 0);
> - }
> + r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
> + ib_result_mc_address, 4096);
> + CU_ASSERT_EQUAL(r, 0);
>
> r = amdgpu_cs_ctx_free(context_handle);
> CU_ASSERT_EQUAL(r, 0);
More information about the dri-devel
mailing list