[PATCH v4 1/2] drm/amdgpu: Fix amdgpu_vm_pt_free warning
Philip Yang
yangp at amd.com
Wed Sep 14 15:41:33 UTC 2022
On 2022-09-14 05:10, Christian König wrote:
> Am 13.09.22 um 22:05 schrieb Philip Yang:
>> Free page table BO from vm resv unlocked context generate below
>> warnings.
>>
>> Add a pt_free_work in vm to free page table BO from vm->pt_freed list.
>> pass vm resv unlock status from page table update caller, and add vm_bo
>> entry to vm->pt_freed list and schedule the pt_free_work if calling with
>> vm resv unlocked.
>>
>> WARNING: CPU: 12 PID: 3238 at
>> drivers/gpu/drm/ttm/ttm_bo.c:106 ttm_bo_set_bulk_move+0xa1/0xc0
>> Call Trace:
>> amdgpu_vm_pt_free+0x42/0xd0 [amdgpu]
>> amdgpu_vm_pt_free_dfs+0xb3/0xf0 [amdgpu]
>> amdgpu_vm_ptes_update+0x52d/0x850 [amdgpu]
>> amdgpu_vm_update_range+0x2a6/0x640 [amdgpu]
>> svm_range_unmap_from_gpus+0x110/0x300 [amdgpu]
>> svm_range_cpu_invalidate_pagetables+0x535/0x600 [amdgpu]
>> __mmu_notifier_invalidate_range_start+0x1cd/0x230
>> unmap_vmas+0x9d/0x140
>> unmap_region+0xa8/0x110
>>
>> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 +++
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 47 ++++++++++++++++++++---
>> 3 files changed, 51 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 59cac347baa3..27e6155053b6 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -2022,6 +2022,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev,
>> struct amdgpu_vm *vm)
>> spin_lock_init(&vm->invalidated_lock);
>> INIT_LIST_HEAD(&vm->freed);
>> INIT_LIST_HEAD(&vm->done);
>> + INIT_LIST_HEAD(&vm->pt_freed);
>> + INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
>> + spin_lock_init(&vm->pt_free_lock);
>> /* create scheduler entities for page table updates */
>> r = drm_sched_entity_init(&vm->immediate,
>> DRM_SCHED_PRIORITY_NORMAL,
>> @@ -2244,6 +2247,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev,
>> struct amdgpu_vm *vm)
>> amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
>> }
>> + cancel_work_sync(&vm->pt_free_work);
>
> We should probably not cancel the work here, but rather just wait for
> it to finish.
Good point, will change to flush_work, and move it outside
amdgpu_bo_reserve(root), as amdgpu_vm_pt_free_work takes
amdgpu_bo_reserve(root), this will cause deadlock.
>
>> amdgpu_vm_pt_free_root(adev, vm);
>> amdgpu_bo_unreserve(root);
>> amdgpu_bo_unref(&root);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> index 9ecb7f663e19..b77fe838c327 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> @@ -276,6 +276,11 @@ struct amdgpu_vm {
>> /* BOs which are invalidated, has been updated in the PTs */
>> struct list_head done;
>> + /* PT BOs scheduled to free and fill with zero if vm_resv is
>> not hold */
>> + struct list_head pt_freed;
>> + struct work_struct pt_free_work;
>> + spinlock_t pt_free_lock;
>> +
>> /* contains the page directory */
>> struct amdgpu_vm_bo_base root;
>> struct dma_fence *last_update;
>> @@ -471,6 +476,7 @@ int amdgpu_vm_pde_update(struct
>> amdgpu_vm_update_params *params,
>> int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
>> uint64_t start, uint64_t end,
>> uint64_t dst, uint64_t flags);
>> +void amdgpu_vm_pt_free_work(struct work_struct *work);
>> #if defined(CONFIG_DEBUG_FS)
>> void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct
>> seq_file *m);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> index 88de9f0d4728..c6f91731ecfb 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> @@ -624,12 +624,22 @@ static int amdgpu_vm_pt_alloc(struct
>> amdgpu_device *adev,
>> *
>> * @entry: PDE to free
>> */
>> -static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
>> +static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry, bool
>> unlocked)
>> {
>> struct amdgpu_bo *shadow;
>> if (!entry->bo)
>> return;
>> +
>> + if (unlocked) {
>> + spin_lock(&entry->vm->pt_free_lock);
>> + list_move(&entry->vm_status, &entry->vm->pt_freed);
>> + spin_unlock(&entry->vm->pt_free_lock);
>> +
>> + schedule_work(&entry->vm->pt_free_work);
>> + return;
>> + }
>> +
>
> I strongly prefer to handle this in amdgpu_vm_pt_free_dfs() or even
> the caller of this instead and keep this here the low level free
> function.
Agree. Will send out v5 patch.
Philip
>
>> shadow = amdgpu_bo_shadowed(entry->bo);
>> if (shadow) {
>> ttm_bo_set_bulk_move(&shadow->tbo, NULL);
>> @@ -641,6 +651,29 @@ static void amdgpu_vm_pt_free(struct
>> amdgpu_vm_bo_base *entry)
>> amdgpu_bo_unref(&entry->bo);
>> }
>> +void amdgpu_vm_pt_free_work(struct work_struct *work)
>
> I think we could make this one here static if we introduce a
> amdgpu_vm_pt_init()/amdgpu_vm_pt_fini() function. But not a must have.
>
> Christian.
>
>> +{
>> + struct amdgpu_vm_bo_base *entry, *next;
>> + struct amdgpu_vm *vm;
>> + struct amdgpu_bo *root;
>> + LIST_HEAD(pt_freed);
>> +
>> + vm = container_of(work, struct amdgpu_vm, pt_free_work);
>> +
>> + spin_lock(&vm->pt_free_lock);
>> + list_splice_init(&vm->pt_freed, &pt_freed);
>> + spin_unlock(&vm->pt_free_lock);
>> +
>> + root = amdgpu_bo_ref(vm->root.bo);
>> + amdgpu_bo_reserve(root, true);
>> +
>> + list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
>> + amdgpu_vm_pt_free(entry, false);
>> +
>> + amdgpu_bo_unreserve(root);
>> + amdgpu_bo_unref(&root);
>> +}
>> +
>> /**
>> * amdgpu_vm_pt_free_dfs - free PD/PT levels
>> *
>> @@ -652,16 +685,17 @@ static void amdgpu_vm_pt_free(struct
>> amdgpu_vm_bo_base *entry)
>> */
>> static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
>> struct amdgpu_vm *vm,
>> - struct amdgpu_vm_pt_cursor *start)
>> + struct amdgpu_vm_pt_cursor *start,
>> + bool unlocked)
>> {
>> struct amdgpu_vm_pt_cursor cursor;
>> struct amdgpu_vm_bo_base *entry;
>> for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
>> - amdgpu_vm_pt_free(entry);
>> + amdgpu_vm_pt_free(entry, unlocked);
>> if (start)
>> - amdgpu_vm_pt_free(start->entry);
>> + amdgpu_vm_pt_free(start->entry, unlocked);
>> }
>> /**
>> @@ -673,7 +707,7 @@ static void amdgpu_vm_pt_free_dfs(struct
>> amdgpu_device *adev,
>> */
>> void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct
>> amdgpu_vm *vm)
>> {
>> - amdgpu_vm_pt_free_dfs(adev, vm, NULL);
>> + amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
>> }
>> /**
>> @@ -966,7 +1000,8 @@ int amdgpu_vm_ptes_update(struct
>> amdgpu_vm_update_params *params,
>> if (cursor.entry->bo) {
>> params->table_freed = true;
>> amdgpu_vm_pt_free_dfs(adev, params->vm,
>> - &cursor);
>> + &cursor,
>> + params->unlocked);
>> }
>> amdgpu_vm_pt_next(adev, &cursor);
>> }
>
More information about the amd-gfx
mailing list