[PATCH 2/2] drm/amdgpu: cleanup VCN IB generation

Christian König ckoenig.leichtzumerken at gmail.com
Fri Feb 9 12:32:25 UTC 2018


Leo could you give this patch on Raven a try and review/ack it?

I've tested patch #1 on Vega10, but I don't have a stable Raven system 
to test this one.

Thanks,
Christian.

Am 07.02.2018 um 20:48 schrieb Christian König:
> Start to use amdgpu_bo_create_reserved.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 91 ++++++++-------------------------
>   1 file changed, 21 insertions(+), 70 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index e86d0b2e9b7a..97bdde9a739e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -270,43 +270,28 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
>   	return r;
>   }
>   
> -static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
> -			       bool direct, struct dma_fence **fence)
> +static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
> +				   struct amdgpu_bo *bo, bool direct,
> +				   struct dma_fence **fence)
>   {
> -	struct ttm_operation_ctx ctx = { true, false };
> -	struct ttm_validate_buffer tv;
> -	struct ww_acquire_ctx ticket;
> -	struct list_head head;
> +	struct amdgpu_device *adev = ring->adev;
> +	struct dma_fence *f = NULL;
>   	struct amdgpu_job *job;
>   	struct amdgpu_ib *ib;
> -	struct dma_fence *f = NULL;
> -	struct amdgpu_device *adev = ring->adev;
>   	uint64_t addr;
>   	int i, r;
>   
> -	memset(&tv, 0, sizeof(tv));
> -	tv.bo = &bo->tbo;
> -
> -	INIT_LIST_HEAD(&head);
> -	list_add(&tv.head, &head);
> -
> -	r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
> -	if (r)
> -		return r;
> -
> -	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> -	if (r)
> -		goto err;
> -
>   	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
>   	if (r)
>   		goto err;
>   
>   	ib = &job->ibs[0];
>   	addr = amdgpu_bo_gpu_offset(bo);
> -	ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
> +	ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0),
> +			     0);
>   	ib->ptr[1] = addr;
> -	ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
> +	ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1),
> +			     0);
>   	ib->ptr[3] = addr >> 32;
>   	ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
>   	ib->ptr[5] = 0;
> @@ -330,11 +315,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
>   			goto err_free;
>   	}
>   
> -	ttm_eu_fence_buffer_objects(&ticket, &head, f);
> +	amdgpu_bo_fence(bo, f, false);
> +	amdgpu_bo_unreserve(bo);
> +	amdgpu_bo_unref(&bo);
>   
>   	if (fence)
>   		*fence = dma_fence_get(f);
> -	amdgpu_bo_unref(&bo);
>   	dma_fence_put(f);
>   
>   	return 0;
> @@ -343,7 +329,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
>   	amdgpu_job_free(job);
>   
>   err:
> -	ttm_eu_backoff_reservation(&ticket, &head);
> +	amdgpu_bo_unreserve(bo);
> +	amdgpu_bo_unref(&bo);
>   	return r;
>   }
>   
> @@ -351,31 +338,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>   			      struct dma_fence **fence)
>   {
>   	struct amdgpu_device *adev = ring->adev;
> -	struct amdgpu_bo *bo;
> +	struct amdgpu_bo *bo = NULL;
>   	uint32_t *msg;
>   	int r, i;
>   
> -	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
> -			     AMDGPU_GEM_DOMAIN_VRAM,
> -			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
> -			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
> -			     NULL, NULL, &bo);
> +	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> +				      AMDGPU_GEM_DOMAIN_VRAM,
> +				      &bo, NULL, (void **)&msg);
>   	if (r)
>   		return r;
>   
> -	r = amdgpu_bo_reserve(bo, false);
> -	if (r) {
> -		amdgpu_bo_unref(&bo);
> -		return r;
> -	}
> -
> -	r = amdgpu_bo_kmap(bo, (void **)&msg);
> -	if (r) {
> -		amdgpu_bo_unreserve(bo);
> -		amdgpu_bo_unref(&bo);
> -		return r;
> -	}
> -
>   	msg[0] = cpu_to_le32(0x00000028);
>   	msg[1] = cpu_to_le32(0x00000038);
>   	msg[2] = cpu_to_le32(0x00000001);
> @@ -393,9 +365,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>   	for (i = 14; i < 1024; ++i)
>   		msg[i] = cpu_to_le32(0x0);
>   
> -	amdgpu_bo_kunmap(bo);
> -	amdgpu_bo_unreserve(bo);
> -
>   	return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
>   }
>   
> @@ -407,27 +376,12 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>   	uint32_t *msg;
>   	int r, i;
>   
> -	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
> -			     AMDGPU_GEM_DOMAIN_VRAM,
> -			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
> -			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
> -			     NULL, NULL, &bo);
> +	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
> +				      AMDGPU_GEM_DOMAIN_VRAM,
> +				      &bo, NULL, (void **)&msg);
>   	if (r)
>   		return r;
>   
> -	r = amdgpu_bo_reserve(bo, false);
> -	if (r) {
> -		amdgpu_bo_unref(&bo);
> -		return r;
> -	}
> -
> -	r = amdgpu_bo_kmap(bo, (void **)&msg);
> -	if (r) {
> -		amdgpu_bo_unreserve(bo);
> -		amdgpu_bo_unref(&bo);
> -		return r;
> -	}
> -
>   	msg[0] = cpu_to_le32(0x00000028);
>   	msg[1] = cpu_to_le32(0x00000018);
>   	msg[2] = cpu_to_le32(0x00000000);
> @@ -437,9 +391,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>   	for (i = 6; i < 1024; ++i)
>   		msg[i] = cpu_to_le32(0x0);
>   
> -	amdgpu_bo_kunmap(bo);
> -	amdgpu_bo_unreserve(bo);
> -
>   	return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
>   }
>   



More information about the amd-gfx mailing list