[PATCH 7/7] drm/amdgpu: do not allocate entries separately
Nirmoy
nirmodas at amd.com
Fri May 21 14:04:58 UTC 2021
On 5/21/21 3:01 PM, Christian König wrote:
> Am 21.05.21 um 14:45 schrieb Nirmoy Das:
>> Allocate PD/PT entries while allocating VM BOs and use that
>> instead of allocating those entries separately.
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 31 ++++++++++++++------------
>> 1 file changed, 17 insertions(+), 14 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 120e6b7a0286..4717f075a391 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -880,7 +880,12 @@ static int amdgpu_vm_pt_create(struct
>> amdgpu_device *adev,
>> bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
>> bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
>> AMDGPU_GEM_CREATE_CPU_GTT_USWC;
>> - bp.bo_ptr_size = sizeof(struct amdgpu_bo_vm);
>> + if (level < AMDGPU_VM_PTB)
>> + bp.bo_ptr_size = struct_size((*vmbo), entries,
>> + amdgpu_vm_num_entries(adev, level));
>> + else
>> + bp.bo_ptr_size = sizeof(struct amdgpu_bo_vm);
>> +
>
> Rather do it like this here:
>
> if (level < AMDGPU_VM_PTB)
> num_entries = amdgpu_vm_num_entries(...)
> else
> num_entries = 0;
>
> bp.bo_ptr_size = struct_size(....)
Sure.
>
> If we have that calculation more than once then it might make sense to
> unify it in a function, but I don't think so of hand.
Currently, we only need this calculation in amdgpu_vm_pt_create().
Nirmoy
>
>
> Regards,
> Christian.
>
>> if (vm->use_cpu_for_update)
>> bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
>> @@ -954,19 +959,14 @@ static int amdgpu_vm_alloc_pts(struct
>> amdgpu_device *adev,
>> struct amdgpu_bo_vm *pt;
>> int r;
>> - if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
>> - unsigned num_entries;
>> -
>> - num_entries = amdgpu_vm_num_entries(adev, cursor->level);
>> - entry->entries = kvmalloc_array(num_entries,
>> - sizeof(*entry->entries),
>> - GFP_KERNEL | __GFP_ZERO);
>> - if (!entry->entries)
>> - return -ENOMEM;
>> - }
>> -
>> - if (entry->base.bo)
>> + if (entry->base.bo) {
>> + if (cursor->level < AMDGPU_VM_PTB)
>> + entry->entries =
>> + to_amdgpu_bo_vm(entry->base.bo)->entries;
>> + else
>> + entry->entries = NULL;
>> return 0;
>> + }
>> r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate,
>> &pt);
>> if (r)
>> @@ -978,6 +978,10 @@ static int amdgpu_vm_alloc_pts(struct
>> amdgpu_device *adev,
>> pt_bo = &pt->bo;
>> pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
>> amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
>> + if (cursor->level < AMDGPU_VM_PTB)
>> + entry->entries = pt->entries;
>> + else
>> + entry->entries = NULL;
>> r = amdgpu_vm_clear_bo(adev, vm, pt_bo, immediate);
>> if (r)
>> @@ -1005,7 +1009,6 @@ static void amdgpu_vm_free_table(struct
>> amdgpu_vm_pt *entry)
>> amdgpu_bo_unref(&to_amdgpu_bo_vm(entry->base.bo)->shadow);
>> amdgpu_bo_unref(&entry->base.bo);
>> }
>> - kvfree(entry->entries);
>> entry->entries = NULL;
>> }
>
More information about the amd-gfx
mailing list