[PATCH v4 2/3] drm/amdgpu: VCE avoid memory allocation during IB test

Christian König christian.koenig at amd.com
Mon Sep 13 09:03:46 UTC 2021


Am 13.09.21 um 10:42 schrieb xinhui pan:
> alloc extra msg from direct IB pool.
>
> Signed-off-by: xinhui pan <xinhui.pan at amd.com>

It would be cleaner if Leo could confirm that 256 byte alignment would 
work as well.

But either way Reviewed-by: Christian König <christian.koenig at amd.com>

Regards,
Christian.

> ---
> change from v1:
> msg is allocated separately.
> msg is aligned to gpu page boundary
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 27 ++++++++++++-------------
>   1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index e9fdf49d69e8..caa4d3420e00 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -82,7 +82,6 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
>   
>   static void amdgpu_vce_idle_work_handler(struct work_struct *work);
>   static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> -				     struct amdgpu_bo *bo,
>   				     struct dma_fence **fence);
>   static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
>   				      bool direct, struct dma_fence **fence);
> @@ -441,12 +440,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
>    * Open up a stream for HW test
>    */
>   static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
> -				     struct amdgpu_bo *bo,
>   				     struct dma_fence **fence)
>   {
>   	const unsigned ib_size_dw = 1024;
>   	struct amdgpu_job *job;
>   	struct amdgpu_ib *ib;
> +	struct amdgpu_ib ib_msg;
>   	struct dma_fence *f = NULL;
>   	uint64_t addr;
>   	int i, r;
> @@ -456,9 +455,17 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
>   	if (r)
>   		return r;
>   
> -	ib = &job->ibs[0];
> +	memset(&ib_msg, 0, sizeof(ib_msg));
> +	/* only one gpu page is needed, alloc +1 page to make addr aligned. */
> +	r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
> +			  AMDGPU_IB_POOL_DIRECT,
> +			  &ib_msg);
> +	if (r)
> +		goto err;
>   
> -	addr = amdgpu_bo_gpu_offset(bo);
> +	ib = &job->ibs[0];
> +	/* let addr point to page boundary */
> +	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
>   
>   	/* stitch together an VCE create msg */
>   	ib->length_dw = 0;
> @@ -498,6 +505,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
>   		ib->ptr[i] = 0x0;
>   
>   	r = amdgpu_job_submit_direct(job, ring, &f);
> +	amdgpu_ib_free(ring->adev, &ib_msg, f);
>   	if (r)
>   		goto err;
>   
> @@ -1134,20 +1142,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
>   int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>   {
>   	struct dma_fence *fence = NULL;
> -	struct amdgpu_bo *bo = NULL;
>   	long r;
>   
>   	/* skip vce ring1/2 ib test for now, since it's not reliable */
>   	if (ring != &ring->adev->vce.ring[0])
>   		return 0;
>   
> -	r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
> -				      AMDGPU_GEM_DOMAIN_VRAM,
> -				      &bo, NULL, NULL);
> -	if (r)
> -		return r;
> -
> -	r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
> +	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
>   	if (r)
>   		goto error;
>   
> @@ -1163,8 +1164,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>   
>   error:
>   	dma_fence_put(fence);
> -	amdgpu_bo_unreserve(bo);
> -	amdgpu_bo_free_kernel(&bo, NULL, NULL);
>   	return r;
>   }
>   



More information about the amd-gfx mailing list