[PATCH v3 8/9] drm/amdgpu: map wptr BO into GART

Christian König christian.koenig at amd.com
Tue Apr 11 09:29:04 UTC 2023


Am 10.04.23 um 02:00 schrieb Bas Nieuwenhuizen:
> On Wed, Mar 29, 2023 at 6:05 PM Shashank Sharma <shashank.sharma at amd.com> wrote:
>> From: Arvind Yadav <arvind.yadav at amd.com>
>>
>> To support oversubscription, MES expects WPTR BOs to be mapped
>> to GART, before they are submitted to usermode queues.
>>
>> Cc: Alex Deucher <alexander.deucher at amd.com>
>> Cc: Christian Koenig <christian.koenig at amd.com>
>> Cc: Shashank Sharma <shashank.sharma at amd.com>
>> Signed-off-by: Arvind Yadav <arvind.yadav at amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 89 +++++++++++++++++++
>>   .../drm/amd/amdgpu/amdgpu_userqueue_gfx_v11.c |  1 +
>>   .../gpu/drm/amd/include/amdgpu_userqueue.h    |  1 +
>>   3 files changed, 91 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
>> index 5672efcbcffc..7409a4ae55da 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
>> @@ -43,6 +43,89 @@ amdgpu_userqueue_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
>>       return idr_find(&uq_mgr->userq_idr, qid);
>>   }
>>
>> +static int
>> +amdgpu_userqueue_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
>> +{
>> +    int ret;
>> +
>> +    ret = amdgpu_bo_reserve(bo, true);
>> +    if (ret) {
>> +        DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
>> +        goto err_reserve_bo_failed;
>> +    }
>> +
>> +    ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
>> +    if (ret) {
>> +        DRM_ERROR("Failed to pin bo. ret %d\n", ret);
>> +        goto err_pin_bo_failed;
>> +    }
>> +
>> +    ret = amdgpu_ttm_alloc_gart(&bo->tbo);
>> +    if (ret) {
>> +        DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
>> +        goto err_map_bo_gart_failed;
>> +    }
>> +
>> +
>> +    amdgpu_bo_unreserve(bo);
>> +    bo = amdgpu_bo_ref(bo);
>> +
>> +    return 0;
>> +
>> +err_map_bo_gart_failed:
>> +    amdgpu_bo_unpin(bo);
>> +err_pin_bo_failed:
>> +    amdgpu_bo_unreserve(bo);
>> +err_reserve_bo_failed:
>> +
>> +    return ret;
>> +}
>> +
>> +
>> +static int
>> +amdgpu_userqueue_create_wptr_mapping(struct amdgpu_device *adev,
>> +                                    struct drm_file *filp,
>> +                                    struct amdgpu_usermode_queue *queue)
>> +{
>> +    struct amdgpu_bo_va_mapping *wptr_mapping;
>> +    struct amdgpu_vm *wptr_vm;
>> +    struct amdgpu_bo *wptr_bo = NULL;
>> +    uint64_t wptr = queue->userq_prop.wptr_gpu_addr;
>> +    int ret;
>> +
>> +    wptr_vm = queue->vm;
>> +    ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
>> +    if (ret)
>> +        goto err_wptr_map_gart;
>> +
>> +    wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
>> +    amdgpu_bo_unreserve(wptr_vm->root.bo);
>> +    if (!wptr_mapping) {
>> +        DRM_ERROR("Failed to lookup wptr bo\n");
>> +        ret = -EINVAL;
>> +        goto err_wptr_map_gart;
>> +    }
> This triggers for wptr BOs mapped to the high half of address space,
> may need some mangling wrt the top bits?

Yeah, correct. Shashank this needs to apply the hole mask before looking 
up the address.

Regards,
Christian.

>
>> +
>> +    wptr_bo = wptr_mapping->bo_va->base.bo;
>> +    if (wptr_bo->tbo.base.size > PAGE_SIZE) {
>> +        DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
>> +        ret = -EINVAL;
>> +        goto err_wptr_map_gart;
>> +    }
>> +
>> +    ret = amdgpu_userqueue_map_gtt_bo_to_gart(adev, wptr_bo);
>> +    if (ret) {
>> +        DRM_ERROR("Failed to map wptr bo to GART\n");
>> +        goto err_wptr_map_gart;
>> +    }
>> +
>> +    queue->wptr_mc_addr = wptr_bo->tbo.resource->start << PAGE_SHIFT;
>> +    return 0;
>> +
>> +err_wptr_map_gart:
>> +    return ret;
>> +}
>> +
>>   static int amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
>>   {
>>       struct amdgpu_usermode_queue *queue;
>> @@ -82,6 +165,12 @@ static int amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq
>>           goto free_queue;
>>       }
>>
>> +    r = amdgpu_userqueue_create_wptr_mapping(uq_mgr->adev, filp, queue);
>> +    if (r) {
>> +        DRM_ERROR("Failed to map WPTR (0x%llx) for userqueue\n", queue->userq_prop.wptr_gpu_addr);
>> +        goto free_queue;
>> +    }
>> +
>>       r = uq_mgr->userq_funcs[queue->queue_type]->mqd_create(uq_mgr, queue);
>>       if (r) {
>>           DRM_ERROR("Failed to create/map userqueue MQD\n");
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_gfx_v11.c
>> index 1627641a4a4e..274e78826334 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_gfx_v11.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_gfx_v11.c
>> @@ -58,6 +58,7 @@ amdgpu_userq_gfx_v11_map(struct amdgpu_userq_mgr *uq_mgr,
>>       queue_input.queue_size = queue->userq_prop.queue_size >> 2;
>>       queue_input.doorbell_offset = queue->userq_prop.doorbell_index;
>>       queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
>> +    queue_input.wptr_mc_addr = queue->wptr_mc_addr;
>>
>>       amdgpu_mes_lock(&adev->mes);
>>       r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
>> diff --git a/drivers/gpu/drm/amd/include/amdgpu_userqueue.h b/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
>> index 8b62ef77cd26..eaab7cf5fff6 100644
>> --- a/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
>> +++ b/drivers/gpu/drm/amd/include/amdgpu_userqueue.h
>> @@ -38,6 +38,7 @@ struct amdgpu_usermode_queue {
>>          int queue_type;
>>          uint64_t flags;
>>          uint64_t doorbell_handle;
>> +       uint64_t wptr_mc_addr;
>>          uint64_t proc_ctx_gpu_addr;
>>          uint64_t gang_ctx_gpu_addr;
>>          uint64_t gds_ctx_gpu_addr;
>> --
>> 2.40.0
>>



More information about the amd-gfx mailing list