[PATCH v2 1/1] drm/amdgpu: cleanup vce,vcn,uvd ring selftests
Nirmoy
nirmodas at amd.com
Fri Jan 8 15:42:34 UTC 2021
On 1/8/21 4:25 PM, Christian König wrote:
> Am 18.12.20 um 15:40 schrieb Nirmoy Das:
>> Use amdgpu_sa_bo instead of amdgpu_bo.
>>
>> v2:
>> * do not initialize bo to get hint from compiler for -Wuninitialized
>> * pass NULL fence to amdgpu_sa_bo_free if fence is undefined.
>
> Found a major bug in this which is probably the reason why that never
> worked before.
>
> See below.
>
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 56 +++++++------------------
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 17 ++++----
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 47 ++++++++++-----------
>> 3 files changed, 45 insertions(+), 75 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> index 8b989670ed66..13450a3df044 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> @@ -1057,7 +1057,7 @@ int amdgpu_uvd_ring_parse_cs(struct
>> amdgpu_cs_parser *parser, uint32_t ib_idx)
>> return 0;
>> }
>>
>> -static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>> amdgpu_bo *bo,
>> +static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct
>> amdgpu_sa_bo *bo,
>> bool direct, struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> @@ -1071,19 +1071,6 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> unsigned offset_idx = 0;
>> unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
>>
>> - amdgpu_bo_kunmap(bo);
>> - amdgpu_bo_unpin(bo);
>> -
>> - if (!ring->adev->uvd.address_64_bit) {
>> - struct ttm_operation_ctx ctx = { true, false };
>> -
>> - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
>> - amdgpu_uvd_force_into_uvd_segment(bo);
>> - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
>> - if (r)
>> - goto err;
>> - }
>> -
>> r = amdgpu_job_alloc_with_ib(adev, 64, direct ?
>> AMDGPU_IB_POOL_DIRECT :
>> AMDGPU_IB_POOL_DELAYED, &job);
>> if (r)
>> @@ -1101,7 +1088,7 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> ib->ptr[0] = data[0];
>> ib->ptr[1] = addr;
>> ib->ptr[2] = data[1];
>> @@ -1115,33 +1102,17 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> ib->length_dw = 16;
>>
>> if (direct) {
>> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
>> - true, false,
>> - msecs_to_jiffies(10));
>> - if (r == 0)
>> - r = -ETIMEDOUT;
>> - if (r < 0)
>> - goto err_free;
>> -
>> r = amdgpu_job_submit_direct(job, ring, &f);
>> if (r)
>> goto err_free;
>> } else {
>> - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
>> - AMDGPU_SYNC_ALWAYS,
>> - AMDGPU_FENCE_OWNER_UNDEFINED);
>> - if (r)
>> - goto err_free;
>> -
>> r = amdgpu_job_submit(job, &adev->uvd.entity,
>> AMDGPU_FENCE_OWNER_UNDEFINED, &f);
>> if (r)
>> goto err_free;
>> }
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -1153,8 +1124,7 @@ static int amdgpu_uvd_send_msg(struct
>> amdgpu_ring *ring, struct amdgpu_bo *bo,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> @@ -1165,16 +1135,17 @@ int amdgpu_uvd_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo;
>> uint32_t *msg;
>> int r, i;
>>
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 1024, PAGE_SIZE);
>
> This 1024 here is wrong. The size is in bytes and not double words. So
> this needs to be 4096.
>
> This only worked because amdgpu_bo_create_reserved always allocated a
> full page instead of 1024 bytes.
>
> Same problem on all other occasions.
I wonder why it worked on newer cards.
>
> Additional to that the direct pool should only be used if we submit
> directly, e.g. during a GPU reset.
>
> Otherwise we need to use the normal pool.
I will resend with above changes.
Thanks,
Nirmoy
>
> Regards,
> Christian.
>
>
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>> /* stitch together an UVD create msg */
>> msg[0] = cpu_to_le32(0x00000de4);
>> msg[1] = cpu_to_le32(0x00000000);
>> @@ -1197,16 +1168,17 @@ int amdgpu_uvd_get_destroy_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>> bool direct, struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo;
>> uint32_t *msg;
>> int r, i;
>>
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(bo);
>> /* stitch together an UVD destroy msg */
>> msg[0] = cpu_to_le32(0x00000de4);
>> msg[1] = cpu_to_le32(0x00000002);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> index 0d5284b936e4..bce29d6975d3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
>> @@ -81,7 +81,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>>
>> static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence);
>> static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> bool direct, struct dma_fence **fence);
>> @@ -437,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device
>> *adev, struct drm_file *filp)
>> * Open up a stream for HW test
>> */
>> static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> const unsigned ib_size_dw = 1024;
>> @@ -454,7 +454,7 @@ static int amdgpu_vce_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t handle,
>>
>> ib = &job->ibs[0];
>>
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>>
>> /* stitch together an VCE create msg */
>> ib->length_dw = 0;
>> @@ -1130,16 +1130,16 @@ int amdgpu_vce_ring_test_ring(struct
>> amdgpu_ring *ring)
>> int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_sa_bo *bo = NULL;
>> + struct amdgpu_device *adev = ring->adev;
>> long r;
>>
>> /* skip vce ring1/2 ib test for now, since it's not reliable */
>> if (ring != &ring->adev->vce.ring[0])
>> return 0;
>>
>> - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, NULL);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + &bo, 512, PAGE_SIZE);
>> if (r)
>> return r;
>>
>> @@ -1158,8 +1158,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring
>> *ring, long timeout)
>> r = 0;
>>
>> error:
>> + amdgpu_sa_bo_free(adev, &bo, fence);
>> dma_fence_put(fence);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> return r;
>> }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> index 4a77c7424dfc..1e46b2f895ba 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> @@ -488,7 +488,7 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct
>> amdgpu_ring *ring)
>> }
>>
>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> @@ -504,7 +504,8 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> +
>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>> ib->ptr[1] = addr;
>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>> @@ -521,9 +522,7 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -535,25 +534,27 @@ static int amdgpu_vcn_dec_send_msg(struct
>> amdgpu_ring *ring,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_sa_bo **bo)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> +
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>>
>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>> +
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000038);
>> msg[2] = cpu_to_le32(0x00000001);
>> @@ -575,18 +576,19 @@ static int amdgpu_vcn_dec_get_create_msg(struct
>> amdgpu_ring *ring, uint32_t hand
>> }
>>
>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring,
>> uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_sa_bo **bo)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> + r = amdgpu_sa_bo_new(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT],
>> + bo, 1024, PAGE_SIZE);
>> +
>> if (r)
>> return r;
>> + msg = amdgpu_sa_bo_cpu_addr(*bo);
>>
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000018);
>> @@ -603,7 +605,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct
>> amdgpu_ring *ring, uint32_t han
>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long
>> timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_sa_bo *bo;
>> long r;
>>
>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> @@ -633,7 +635,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct
>> amdgpu_ring *ring, long timeout)
>> }
>>
>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_sa_bo *bo,
>> struct dma_fence **fence)
>> {
>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>> @@ -651,7 +653,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = amdgpu_sa_bo_gpu_addr(bo);
>> ib->length_dw = 0;
>>
>> ib->ptr[ib->length_dw++] = sizeof(struct
>> amdgpu_vcn_decode_buffer) + 8;
>> @@ -671,9 +673,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -685,15 +685,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct
>> amdgpu_ring *ring,
>> amdgpu_job_free(job);
>>
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_sa_bo_free(adev, &bo, NULL);
>> return r;
>> }
>>
>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long
>> timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_sa_bo *bo;
>> long r;
>>
>> r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> --
>> 2.29.2
>>
>
More information about the amd-gfx
mailing list