[PATCH v2 3/5] drm/amdgpu: switch to amdgpu_bo_vm for vm code
Christian König
christian.koenig at amd.com
Thu May 27 09:45:26 UTC 2021
Am 26.05.21 um 19:32 schrieb Nirmoy:
> Hi Christian,
>
> On 5/26/21 4:58 PM, Christian König wrote:
>> Am 26.05.21 um 15:06 schrieb Nirmoy Das:
>>> The subclass, amdgpu_bo_vm is intended for PT/PD BOs which are also
>>> shadowed, so switch to amdgpu_bo_vm BO for PT/PD BOs.
>>>
>>> v2: squash three related patches.
>>>
>>> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
>>> ---
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 90
>>> +++++++++++++--------
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 16 ++--
>>> 2 files changed, 67 insertions(+), 39 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> index 6bc7566cc193..80d50e6d75f9 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> @@ -658,9 +658,9 @@ void amdgpu_vm_move_to_lru_tail(struct
>>> amdgpu_device *adev,
>>>
>>> ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
>>> &vm->lru_bulk_move);
>>> - if (bo->shadow)
>>> - ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
>>> - &bo->shadow->tbo.mem,
>>> + if (bo->tbo.type == ttm_bo_type_kernel)
>>> + ttm_bo_move_to_lru_tail(&to_amdgpu_bo_vm(bo)->shadow->tbo,
>>> + &to_amdgpu_bo_vm(bo)->shadow->tbo.mem,
>>
>> Maybe use a local variable for the shadow BO here.
>>
>>> &vm->lru_bulk_move);
>>> }
>>> spin_unlock(&adev->mman.bdev.lru_lock);
>>> @@ -696,8 +696,8 @@ int amdgpu_vm_validate_pt_bos(struct
>>> amdgpu_device *adev, struct amdgpu_vm *vm,
>>> r = validate(param, bo);
>>> if (r)
>>> return r;
>>> - if (bo->shadow) {
>>> - r = validate(param, bo->shadow);
>>> + if (bo->tbo.type == ttm_bo_type_kernel) {
>>> + r = validate(param, to_amdgpu_bo_vm(bo)->shadow);
>>> if (r)
>>> return r;
>>> }
>>> @@ -793,8 +793,9 @@ static int amdgpu_vm_clear_bo(struct
>>> amdgpu_device *adev,
>>> if (r)
>>> return r;
>>>
>>> - if (bo->shadow) {
>>> - r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
>>> + if (bo->tbo.type == ttm_bo_type_kernel) {
>>> + r = ttm_bo_validate(&to_amdgpu_bo_vm(bo)->shadow->tbo,
>>> + &to_amdgpu_bo_vm(bo)->shadow->placement,
>>
>> Same here.
>>
>>> &ctx);
>>> if (r)
>>> return r;
>>> @@ -863,14 +864,17 @@ static int amdgpu_vm_clear_bo(struct
>>> amdgpu_device *adev,
>>> * @vm: requesting vm
>>> * @level: the page table level
>>> * @immediate: use a immediate update
>>> - * @bo: pointer to the buffer object pointer
>>> + * @vmbo: pointer to the buffer object pointer
>>> */
>>> static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
>>> struct amdgpu_vm *vm,
>>> int level, bool immediate,
>>> - struct amdgpu_bo **bo)
>>> + struct amdgpu_bo_vm **vmbo)
>>> {
>>> struct amdgpu_bo_param bp;
>>> + struct amdgpu_bo *bo;
>>> + struct amdgpu_bo *shadow_bo;
>>> + struct dma_resv *resv;
>>> int r;
>>>
>>> memset(&bp, 0, sizeof(bp));
>>> @@ -881,7 +885,7 @@ static int amdgpu_vm_pt_create(struct
>>> amdgpu_device *adev,
>>> bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
>>> bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>>> AMDGPU_GEM_CREATE_CPU_GTT_USWC;
>>> - bp.bo_ptr_size = sizeof(struct amdgpu_bo);
>>> + bp.bo_ptr_size = sizeof(struct amdgpu_bo_vm);
>>> if (vm->use_cpu_for_update)
>>> bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
>>>
>>> @@ -890,26 +894,43 @@ static int amdgpu_vm_pt_create(struct
>>> amdgpu_device *adev,
>>> if (vm->root.base.bo)
>>> bp.resv = vm->root.base.bo->tbo.base.resv;
>>>
>>> - r = amdgpu_bo_create(adev, &bp, bo);
>>> + r = amdgpu_bo_create_vm(adev, &bp, vmbo);
>>> if (r)
>>> return r;
>>
>>> + bo = &(*vmbo)->bo;
>>> if (vm->is_compute_context && (adev->flags & AMD_IS_APU))
>>> return 0;
>>>
>>> if (!bp.resv)
>>> - WARN_ON(dma_resv_lock((*bo)->tbo.base.resv,
>>> + WARN_ON(dma_resv_lock(bo->tbo.base.resv,
>>> NULL));
>>> - r = amdgpu_bo_create_shadow(adev, bp.size, *bo);
>>> + resv = bp.resv;
>>
>> Maybe shuffle that code around a bit, then you only need the resv
>> variable and no longer the bo variable.
>
>
> I can remove the shadow_bo variable but I need the bo for
> amdgpu_bo_unref(), as this takes amdgpu_bo ** as argument.
Fine with me as well, just try to clean up the code a bit.
Regards,
Christian.
>
>
> Regards,
>
> Nirmoy
>
>
>>
>>> + memset(&bp, 0, sizeof(bp));
>>> + bp.size = amdgpu_vm_bo_size(adev, level);
>>> + bp.domain = AMDGPU_GEM_DOMAIN_GTT;
>>> + bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
>>> + bp.type = ttm_bo_type_kernel;
>>> + bp.resv = bo->tbo.base.resv;
>>> + bp.bo_ptr_size = sizeof(struct amdgpu_bo);
>>>
>>> - if (!bp.resv)
>>> - dma_resv_unlock((*bo)->tbo.base.resv);
>>> + r = amdgpu_bo_create(adev, &bp, &shadow_bo);
>>
>>> +
>>> +
>>
>> Remove the two empty lines here
>>
>>> + if (!resv)
>>> + dma_resv_unlock(bo->tbo.base.resv);
>>>
>>> if (r) {
>>> - amdgpu_bo_unref(bo);
>>> + amdgpu_bo_unref(&bo);
>>> return r;
>>> }
>>>
>>> + shadow_bo->parent = amdgpu_bo_ref(bo);
>>> + mutex_lock(&adev->shadow_list_lock);
>>> + list_add_tail(&shadow_bo->shadow_list, &adev->shadow_list);
>>> + mutex_unlock(&adev->shadow_list_lock);
>>> + (*vmbo)->shadow = shadow_bo;
>>
>> Ok, we should either move the shadow_list into the vm_mgr structure
>> or keep that in the object code.
>>
>> I think I prefer the later, something like
>> amdgpu_bo_add_to_shadow_list().
>>
>>> +
>>> return 0;
>>> }
>>>
>>> @@ -933,7 +954,8 @@ static int amdgpu_vm_alloc_pts(struct
>>> amdgpu_device *adev,
>>> bool immediate)
>>> {
>>> struct amdgpu_vm_pt *entry = cursor->entry;
>>> - struct amdgpu_bo *pt;
>>> + struct amdgpu_bo *pt_bo;
>>> + struct amdgpu_bo_vm *pt;
>>> int r;
>>>
>>> if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
>>> @@ -957,10 +979,11 @@ static int amdgpu_vm_alloc_pts(struct
>>> amdgpu_device *adev,
>>> /* Keep a reference to the root directory to avoid
>>> * freeing them up in the wrong order.
>>> */
>>> - pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
>>> - amdgpu_vm_bo_base_init(&entry->base, vm, pt);
>>> + pt_bo = &pt->bo;
>>> + pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
>>> + amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
>>>
>>> - r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
>>> + r = amdgpu_vm_clear_bo(adev, vm, pt_bo, immediate);
>>> if (r)
>>> goto error_free_pt;
>>>
>>> @@ -968,7 +991,7 @@ static int amdgpu_vm_alloc_pts(struct
>>> amdgpu_device *adev,
>>>
>>> error_free_pt:
>>> amdgpu_bo_unref(&pt->shadow);
>>> - amdgpu_bo_unref(&pt);
>>> + amdgpu_bo_unref(&pt_bo);
>>> return r;
>>> }
>>>
>>> @@ -982,7 +1005,8 @@ static void amdgpu_vm_free_table(struct
>>> amdgpu_vm_pt *entry)
>>> if (entry->base.bo) {
>>> entry->base.bo->vm_bo = NULL;
>>> list_del(&entry->base.vm_status);
>>> - amdgpu_bo_unref(&entry->base.bo->shadow);
>>> + if (entry->base.bo->tbo.type == ttm_bo_type_kernel)
>>
>> That should always be true, otherwise we have a rather big bug.
>>
>> So no need to check that here.
>>
>>> + amdgpu_bo_unref(&to_amdgpu_bo_vm(entry->base.bo)->shadow);
>>> amdgpu_bo_unref(&entry->base.bo);
>>> }
>>> kvfree(entry->entries);
>>> @@ -2674,7 +2698,8 @@ void amdgpu_vm_bo_invalidate(struct
>>> amdgpu_device *adev,
>>> struct amdgpu_vm_bo_base *bo_base;
>>>
>>> /* shadow bo doesn't have bo base, its validation needs its
>>> parent */
>>> - if (bo->parent && bo->parent->shadow == bo)
>>> + if (bo->parent && bo->tbo.type == ttm_bo_type_kernel &&
>>> + to_amdgpu_bo_vm(bo->parent)->shadow == bo)
>>> bo = bo->parent;
>>>
>>> for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
>>> @@ -2843,7 +2868,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm,
>>> long timeout)
>>> */
>>> int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm
>>> *vm, u32 pasid)
>>> {
>>> - struct amdgpu_bo *root;
>>> + struct amdgpu_bo *root_bo;
>>> + struct amdgpu_bo_vm *root;
>>> int r, i;
>>>
>>> vm->va = RB_ROOT_CACHED;
>>> @@ -2897,18 +2923,18 @@ int amdgpu_vm_init(struct amdgpu_device
>>> *adev, struct amdgpu_vm *vm, u32 pasid)
>>> false, &root);
>>> if (r)
>>> goto error_free_delayed;
>>> -
>>> - r = amdgpu_bo_reserve(root, true);
>>> + root_bo = &root->bo;
>>> + r = amdgpu_bo_reserve(root_bo, true);
>>> if (r)
>>> goto error_free_root;
>>>
>>> - r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
>>> + r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
>>> if (r)
>>> goto error_unreserve;
>>>
>>> - amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
>>> + amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
>>>
>>> - r = amdgpu_vm_clear_bo(adev, vm, root, false);
>>> + r = amdgpu_vm_clear_bo(adev, vm, root_bo, false);
>>> if (r)
>>> goto error_unreserve;
>>>
>>> @@ -2935,8 +2961,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev,
>>> struct amdgpu_vm *vm, u32 pasid)
>>> amdgpu_bo_unreserve(vm->root.base.bo);
>>>
>>> error_free_root:
>>> - amdgpu_bo_unref(&vm->root.base.bo->shadow);
>>> - amdgpu_bo_unref(&vm->root.base.bo);
>>> + amdgpu_bo_unref(&root->shadow);
>>> + amdgpu_bo_unref(&root_bo);
>>> vm->root.base.bo = NULL;
>>>
>>> error_free_delayed:
>>> @@ -3078,7 +3104,7 @@ int amdgpu_vm_make_compute(struct
>>> amdgpu_device *adev, struct amdgpu_vm *vm,
>>> }
>>>
>>> /* Free the shadow bo for compute VM */
>>> - amdgpu_bo_unref(&vm->root.base.bo->shadow);
>>> + amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
>>>
>>> if (pasid)
>>> vm->pasid = pasid;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
>>> index a83a646759c5..3d9cff0c9dda 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
>>> @@ -41,8 +41,8 @@ static int amdgpu_vm_sdma_map_table(struct
>>> amdgpu_bo *table)
>>> if (r)
>>> return r;
>>>
>>> - if (table->shadow)
>>> - r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
>>> + if (table->tbo.type == ttm_bo_type_kernel)
>>
>>
>> Again that check should be unecessary.
>>> + r =
>>> amdgpu_ttm_alloc_gart(&to_amdgpu_bo_vm(table)->shadow->tbo);
>>>
>>> return r;
>>> }
>>> @@ -238,8 +238,9 @@ static int amdgpu_vm_sdma_update(struct
>>> amdgpu_vm_update_params *p,
>>>
>>> if (!p->pages_addr) {
>>> /* set page commands needed */
>>> - if (bo->shadow)
>>> - amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
>>> + if (bo->tbo.type == ttm_bo_type_kernel)
>>
>> Same here.
>>
>>> + amdgpu_vm_sdma_set_ptes(p, to_amdgpu_bo_vm(bo)->shadow,
>>> + pe, addr,
>>> count, incr, flags);
>>> amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
>>> incr, flags);
>>> @@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct
>>> amdgpu_vm_update_params *p,
>>>
>>> /* copy commands needed */
>>> ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
>>> - (bo->shadow ? 2 : 1);
>>> + ((bo->tbo.type == ttm_bo_type_kernel) ? 2 : 1);
>>
>> And that here won't work and allocate to much SDMA space.
>>
>>>
>>> /* for padding */
>>> ndw -= 7;
>>> @@ -263,8 +264,9 @@ static int amdgpu_vm_sdma_update(struct
>>> amdgpu_vm_update_params *p,
>>> pte[i] |= flags;
>>> }
>>>
>>> - if (bo->shadow)
>>> - amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
>>> + if (bo->tbo.type == ttm_bo_type_kernel)
>> And that is wrong as well.
>>
>> Christian.
>>
>>> + amdgpu_vm_sdma_copy_ptes(p, to_amdgpu_bo_vm(bo)->shadow,
>>> + pe, nptes);
>>> amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
>>>
>>> pe += nptes * 8;
>>> --
>>> 2.31.1
>>>
>>
More information about the amd-gfx
mailing list