[PATCH 1/1] drm/amdgpu: fix amdgpu_vm_pt_free warning

Philip Yang yangp at amd.com
Mon Sep 12 13:59:48 UTC 2022


On 2022-09-09 16:01, Felix Kuehling wrote:
> Am 2022-09-09 um 14:25 schrieb Philip Yang:
>> Free page table BO from vm resv unlocked context generate below
>> warnings.
>>
>> Add a free_work in vm_bo base of each page table BO, pass vm resv
>> unlock status from page table update caller, and schedule the
>> work if calling with vm resv unlocked.
>>
>>   WARNING: CPU: 12 PID: 3238 at
>>   drivers/gpu/drm/ttm/ttm_bo.c:106 ttm_bo_set_bulk_move+0xa1/0xc0
>>   Call Trace:
>>    amdgpu_vm_pt_free+0x42/0xd0 [amdgpu]
>>    amdgpu_vm_pt_free_dfs+0xb3/0xf0 [amdgpu]
>>    amdgpu_vm_ptes_update+0x52d/0x850 [amdgpu]
>>    amdgpu_vm_update_range+0x2a6/0x640 [amdgpu]
>>    svm_range_unmap_from_gpus+0x110/0x300 [amdgpu]
>>    svm_range_cpu_invalidate_pagetables+0x535/0x600 [amdgpu]
>>   __mmu_notifier_invalidate_range_start+0x1cd/0x230
>>    unmap_vmas+0x9d/0x140
>>    unmap_region+0xa8/0x110
>>
>>   WARNING: CPU: 0 PID: 1475 at
>>   drivers/dma-buf/dma-resv.c:483 dma_resv_iter_next
>>   Call Trace:
>>    dma_resv_iter_first+0x43/0xa0
>>    amdgpu_vm_sdma_update+0x69/0x2d0 [amdgpu]
>>    amdgpu_vm_ptes_update+0x29c/0x870 [amdgpu]
>>    amdgpu_vm_update_range+0x2f6/0x6c0 [amdgpu]
>>    svm_range_unmap_from_gpus+0x115/0x300 [amdgpu]
>>    svm_range_cpu_invalidate_pagetables+0x510/0x5e0 [amdgpu]
>>    __mmu_notifier_invalidate_range_start+0x1d3/0x230
>>    unmap_vmas+0x140/0x150
>>    unmap_region+0xa8/0x110
>>
>> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h    |  3 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 40 +++++++++++++++++++----
>>   2 files changed, 37 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> index 9ecb7f663e19..ac7cd2c738e5 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> @@ -150,6 +150,9 @@ struct amdgpu_vm_bo_base {
>>         /* protected by the BO being reserved */
>>       bool                moved;
>> +
>> +    /* work to free pt bo if vm resv is not hold */
>> +    struct work_struct              free_work;
>>   };
>>     /* provided by hw blocks that can write ptes, e.g., sdma */
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> index 88de9f0d4728..e6f6d7e6368f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> @@ -37,6 +37,8 @@ struct amdgpu_vm_pt_cursor {
>>       unsigned int level;
>>   };
>>   +static void amdgpu_vm_pt_free_work(struct work_struct *work);
>> +
>>   /**
>>    * amdgpu_vm_pt_level_shift - return the addr shift for each level
>>    *
>> @@ -607,6 +609,7 @@ static int amdgpu_vm_pt_alloc(struct 
>> amdgpu_device *adev,
>>       pt_bo = &pt->bo;
>>       pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
>>       amdgpu_vm_bo_base_init(entry, vm, pt_bo);
>> +    INIT_WORK(&entry->free_work, amdgpu_vm_pt_free_work);
>>       r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
>>       if (r)
>>           goto error_free_pt;
>> @@ -624,23 +627,46 @@ static int amdgpu_vm_pt_alloc(struct 
>> amdgpu_device *adev,
>>    *
>>    * @entry: PDE to free
>>    */
>> -static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
>> +static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry, bool 
>> unlocked)
>>   {
>>       struct amdgpu_bo *shadow;
>>         if (!entry->bo)
>>           return;
>> +
>> +    if (unlocked) {
>> +        schedule_work(&entry->free_work);
>> +        return;
>> +    }
>> +
>>       shadow = amdgpu_bo_shadowed(entry->bo);
>>       if (shadow) {
>>           ttm_bo_set_bulk_move(&shadow->tbo, NULL);
>>           amdgpu_bo_unref(&shadow);
>>       }
>>       ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
>> +
>>       entry->bo->vm_bo = NULL;
>>       list_del(&entry->vm_status);
>>       amdgpu_bo_unref(&entry->bo);
>>   }
>>   +static void amdgpu_vm_pt_free_work(struct work_struct *work)
>> +{
>> +    struct amdgpu_vm_bo_base *entry;
>> +    struct amdgpu_bo *root;
>> +
>> +    entry = container_of(work, struct amdgpu_vm_bo_base, delayed_work);
>> +
>> +    root = amdgpu_bo_ref(entry->vm->root.bo);
>> +    amdgpu_bo_reserve(root, true);
>> +
>> +    amdgpu_vm_pt_free(entry, true);
>
> Doesn't this just cause the work to be scheduled again, rather than 
> actually freeing the BO?

You are right, I will send v2 using Christian's suggestion to use a 
single worker in the vm.

Thanks,

Philip

>
> Regards,
>   Felix
>
>
>> +
>> +    amdgpu_bo_unreserve(root);
>> +    amdgpu_bo_unref(&root);
>> +}
>> +
>>   /**
>>    * amdgpu_vm_pt_free_dfs - free PD/PT levels
>>    *
>> @@ -652,16 +678,17 @@ static void amdgpu_vm_pt_free(struct 
>> amdgpu_vm_bo_base *entry)
>>    */
>>   static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
>>                     struct amdgpu_vm *vm,
>> -                  struct amdgpu_vm_pt_cursor *start)
>> +                  struct amdgpu_vm_pt_cursor *start,
>> +                  bool unlocked)
>>   {
>>       struct amdgpu_vm_pt_cursor cursor;
>>       struct amdgpu_vm_bo_base *entry;
>>         for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
>> -        amdgpu_vm_pt_free(entry);
>> +        amdgpu_vm_pt_free(entry, unlocked);
>>         if (start)
>> -        amdgpu_vm_pt_free(start->entry);
>> +        amdgpu_vm_pt_free(start->entry, unlocked);
>>   }
>>     /**
>> @@ -673,7 +700,7 @@ static void amdgpu_vm_pt_free_dfs(struct 
>> amdgpu_device *adev,
>>    */
>>   void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct 
>> amdgpu_vm *vm)
>>   {
>> -    amdgpu_vm_pt_free_dfs(adev, vm, NULL);
>> +    amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
>>   }
>>     /**
>> @@ -966,7 +993,8 @@ int amdgpu_vm_ptes_update(struct 
>> amdgpu_vm_update_params *params,
>>                   if (cursor.entry->bo) {
>>                       params->table_freed = true;
>>                       amdgpu_vm_pt_free_dfs(adev, params->vm,
>> -                                  &cursor);
>> +                                  &cursor,
>> +                                  params->unlocked);
>>                   }
>>                   amdgpu_vm_pt_next(adev, &cursor);
>>               }


More information about the amd-gfx mailing list