[PATCH v4 08/10] drm/amdgpu: map wptr BO into GART

Christian König christian.koenig at amd.com
Tue Apr 25 12:36:56 UTC 2023


Am 24.04.23 um 19:38 schrieb Shashank Sharma:
> To support oversubscription, MES FW expects WPTR BOs to
> be mapped into GART, before they are submitted to usermode
> queues. This patch adds a function for the same.
>
> V4: fix the wptr value before mapping lookup (Bas, Christian).
>
> Cc: Alex Deucher <alexander.deucher at amd.com>
> Cc: Christian Koenig <christian.koenig at amd.com>
> Signed-off-by: Arvind Yadav <arvind.yadav at amd.com>
> Signed-off-by: Shashank Sharma <shashank.sharma at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 90 +++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c        |  1 +
>   .../gpu/drm/amd/include/amdgpu_userqueue.h    |  1 +
>   3 files changed, 92 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> index e95fb35b0cb5..385cd51b6c96 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> @@ -44,6 +44,89 @@ amdgpu_userqueue_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
>   	return idr_find(&uq_mgr->userq_idr, qid);
>   }
>   
> +static int
> +amdgpu_userqueue_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
> +{
> +	int ret;
> +
> +	ret = amdgpu_bo_reserve(bo, true);
> +	if (ret) {
> +		DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
> +		goto err_reserve_bo_failed;
> +	}
> +
> +	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
> +	if (ret) {
> +		DRM_ERROR("Failed to pin bo. ret %d\n", ret);
> +		goto err_pin_bo_failed;
> +	}
> +
> +	ret = amdgpu_ttm_alloc_gart(&bo->tbo);
> +	if (ret) {
> +		DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
> +		goto err_map_bo_gart_failed;
> +	}

Either pinning *or* allocating GART, but not both!

I think calling amdgpu_ttm_alloc_gart() is the right thing to do here.

> +
> +	amdgpu_bo_unreserve(bo);
> +	bo = amdgpu_bo_ref(bo);
> +
> +	return 0;
> +
> +err_map_bo_gart_failed:
> +	amdgpu_bo_unpin(bo);
> +err_pin_bo_failed:
> +	amdgpu_bo_unreserve(bo);
> +err_reserve_bo_failed:
> +
> +	return ret;
> +}
> +
> +
> +static int
> +amdgpu_userqueue_create_wptr_mapping(struct amdgpu_device *adev,
> +				     struct drm_file *filp,
> +				     struct amdgpu_usermode_queue *queue)
> +{
> +	struct amdgpu_bo_va_mapping *wptr_mapping;
> +	struct amdgpu_vm *wptr_vm;
> +	struct amdgpu_bo *wptr_bo = NULL;
> +	uint64_t wptr = queue->userq_prop.wptr_gpu_addr;
> +	int ret;
> +
> +	wptr_vm = queue->vm;
> +	ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);

All the handling must be done with the VM and all resource locks held.

So this should be something the caller of the function does.

Regards,
Christian.

> +	if (ret)
> +		goto err_wptr_map_gart;
> +
> +	wptr &= AMDGPU_GMC_HOLE_MASK;
> +	wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
> +	amdgpu_bo_unreserve(wptr_vm->root.bo);
> +	if (!wptr_mapping) {
> +		DRM_ERROR("Failed to lookup wptr bo\n");
> +		ret = -EINVAL;
> +		goto err_wptr_map_gart;
> +	}
> +
> +	wptr_bo = wptr_mapping->bo_va->base.bo;
> +	if (wptr_bo->tbo.base.size > PAGE_SIZE) {
> +		DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
> +		ret = -EINVAL;
> +		goto err_wptr_map_gart;
> +	}
> +
> +	ret = amdgpu_userqueue_map_gtt_bo_to_gart(adev, wptr_bo);
> +	if (ret) {
> +		DRM_ERROR("Failed to map wptr bo to GART\n");
> +		goto err_wptr_map_gart;
> +	}
> +
> +	queue->wptr_mc_addr = wptr_bo->tbo.resource->start << PAGE_SHIFT;
> +	return 0;
> +
> +err_wptr_map_gart:
> +	return ret;
> +}
> +
>   static int amdgpu_userqueue_create_gfx(struct drm_file *filp, union drm_amdgpu_userq *args)
>   {
>   	struct amdgpu_usermode_queue *queue;
> @@ -81,6 +164,13 @@ static int amdgpu_userqueue_create_gfx(struct drm_file *filp, union drm_amdgpu_u
>   		goto free_queue;
>   	}
>   
> +	r = amdgpu_userqueue_create_wptr_mapping(uq_mgr->adev, filp, queue);
> +	if (r) {
> +		DRM_ERROR("Failed to map WPTR (0x%llx) for userqueue\n",
> +			   queue->userq_prop.wptr_gpu_addr);
> +		goto free_queue;
> +	}
> +
>   	if (uq_mgr->userq_funcs[queue->queue_type]->mqd_create) {
>   		r = uq_mgr->userq_funcs[queue->queue_type]->mqd_create(uq_mgr, queue);
>   		if (r) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
> index 7a45d68091ec..6eeae0206d8a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
> @@ -6439,6 +6439,7 @@ static int gfx_v11_userq_map(struct amdgpu_userq_mgr *uq_mgr,
>   	queue_input.queue_size = queue->userq_prop.queue_size >> 2;
>   	queue_input.doorbell_offset = queue->userq_prop.doorbell_index;
>   	queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
> +	queue_input.wptr_mc_addr = queue->wptr_mc_addr;
>   
>   	amdgpu_mes_lock(&adev->mes);
>   	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
> diff --git a/drivers/gpu/drm/amd/include/amdgpu_userqueue.h b/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
> index 11e8ad649f6e..0001ecd710a7 100644
> --- a/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
> +++ b/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
> @@ -38,6 +38,7 @@ struct amdgpu_usermode_queue {
>   	int			queue_id;
>   	int			queue_type;
>   	uint64_t		doorbell_handle;
> +	uint64_t		wptr_mc_addr;
>   	uint64_t		proc_ctx_gpu_addr;
>   	uint64_t		gang_ctx_gpu_addr;
>   	uint64_t		gds_ctx_gpu_addr;



More information about the amd-gfx mailing list