[PATCH v2 1/1] drm/amdgpu: fix amdgpu_vm_pt_free warning
Philip Yang
yangp at amd.com
Tue Sep 13 13:14:12 UTC 2022
On 2022-09-12 14:59, Christian König wrote:
>
>
> Am 12.09.22 um 20:48 schrieb Felix Kuehling:
>> Am 2022-09-12 um 13:58 schrieb Philip Yang:
>>> Free page table BO from vm resv unlocked context generate below
>>> warnings.
>>>
>>> Add a free_bo_work in vm to free page able BO from vm->bo_freed list.
>>> pass vm resv unlock status from page table update caller, and add vm_bo
>>> entry to vm->bo_freed_list and schedule the work if calling with vm
>>> resv
>>> unlocked.
>>>
>>> WARNING: CPU: 12 PID: 3238 at
>>> drivers/gpu/drm/ttm/ttm_bo.c:106 ttm_bo_set_bulk_move+0xa1/0xc0
>>> Call Trace:
>>> amdgpu_vm_pt_free+0x42/0xd0 [amdgpu]
>>> amdgpu_vm_pt_free_dfs+0xb3/0xf0 [amdgpu]
>>> amdgpu_vm_ptes_update+0x52d/0x850 [amdgpu]
>>> amdgpu_vm_update_range+0x2a6/0x640 [amdgpu]
>>> svm_range_unmap_from_gpus+0x110/0x300 [amdgpu]
>>> svm_range_cpu_invalidate_pagetables+0x535/0x600 [amdgpu]
>>> __mmu_notifier_invalidate_range_start+0x1cd/0x230
>>> unmap_vmas+0x9d/0x140
>>> unmap_region+0xa8/0x110
>>>
>>> WARNING: CPU: 0 PID: 1475 at
>>> drivers/dma-buf/dma-resv.c:483 dma_resv_iter_next
>>> Call Trace:
>>> dma_resv_iter_first+0x43/0xa0
>>> amdgpu_vm_sdma_update+0x69/0x2d0 [amdgpu]
>>> amdgpu_vm_ptes_update+0x29c/0x870 [amdgpu]
>>> amdgpu_vm_update_range+0x2f6/0x6c0 [amdgpu]
>>> svm_range_unmap_from_gpus+0x115/0x300 [amdgpu]
>>> svm_range_cpu_invalidate_pagetables+0x510/0x5e0 [amdgpu]
>>> __mmu_notifier_invalidate_range_start+0x1d3/0x230
>>> unmap_vmas+0x140/0x150
>>> unmap_region+0xa8/0x110
>>>
>>> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
>>> ---
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 +++
>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 51
>>> ++++++++++++++++++++---
>>> 3 files changed, 55 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> index 59cac347baa3..add3b08ffde9 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> @@ -2022,6 +2022,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev,
>>> struct amdgpu_vm *vm)
>>> spin_lock_init(&vm->invalidated_lock);
>>> INIT_LIST_HEAD(&vm->freed);
>>> INIT_LIST_HEAD(&vm->done);
>>> + INIT_LIST_HEAD(&vm->bo_freed);
>>> + INIT_WORK(&vm->bo_free_work, amdgpu_vm_pt_free_work);
>>> + spin_lock_init(&vm->bo_free_lock);
>>> /* create scheduler entities for page table updates */
>>> r = drm_sched_entity_init(&vm->immediate,
>>> DRM_SCHED_PRIORITY_NORMAL,
>>> @@ -2244,6 +2247,7 @@ void amdgpu_vm_fini(struct amdgpu_device
>>> *adev, struct amdgpu_vm *vm)
>>> amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
>>> }
>>> + cancel_work_sync(&vm->bo_free_work);
>>> amdgpu_vm_pt_free_root(adev, vm);
>>> amdgpu_bo_unreserve(root);
>>> amdgpu_bo_unref(&root);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> index 9ecb7f663e19..c8c59c66ca2f 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> @@ -276,6 +276,11 @@ struct amdgpu_vm {
>>> /* BOs which are invalidated, has been updated in the PTs */
>>> struct list_head done;
>>> + /* PT BOs scheduled to free and fill with zero if vm_resv is
>>> not hold */
>>> + struct list_head bo_freed;
>>> + struct work_struct bo_free_work;
>>> + spinlock_t bo_free_lock;
>>> +
>>> /* contains the page directory */
>>> struct amdgpu_vm_bo_base root;
>>> struct dma_fence *last_update;
>>> @@ -471,6 +476,7 @@ int amdgpu_vm_pde_update(struct
>>> amdgpu_vm_update_params *params,
>>> int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
>>> uint64_t start, uint64_t end,
>>> uint64_t dst, uint64_t flags);
>>> +void amdgpu_vm_pt_free_work(struct work_struct *work);
>>> #if defined(CONFIG_DEBUG_FS)
>>> void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct
>>> seq_file *m);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>> index 88de9f0d4728..12471ef126a9 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>> @@ -624,12 +624,22 @@ static int amdgpu_vm_pt_alloc(struct
>>> amdgpu_device *adev,
>>> *
>>> * @entry: PDE to free
>>> */
>>> -static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
>>> +static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry, bool
>>> unlocked)
>>> {
>>> struct amdgpu_bo *shadow;
>>> if (!entry->bo)
>>> return;
>>> +
>>> + if (unlocked) {
>>> + spin_lock(&entry->vm->bo_free_lock);
>>> + list_move(&entry->vm_status, &entry->vm->bo_freed);
>>> + spin_unlock(&entry->vm->bo_free_lock);
>>> +
>>> + schedule_work(&entry->vm->bo_free_work);
>>> + return;
>>> + }
>>> +
>>> shadow = amdgpu_bo_shadowed(entry->bo);
>>> if (shadow) {
>>> ttm_bo_set_bulk_move(&shadow->tbo, NULL);
>>> @@ -641,6 +651,33 @@ static void amdgpu_vm_pt_free(struct
>>> amdgpu_vm_bo_base *entry)
>>> amdgpu_bo_unref(&entry->bo);
>>> }
>>> +void amdgpu_vm_pt_free_work(struct work_struct *work)
>>> +{
>>> + struct amdgpu_vm_bo_base *entry;
>>> + struct amdgpu_vm *vm;
>>> + struct amdgpu_bo *root;
>>> +
>>> + vm = container_of(work, struct amdgpu_vm, bo_free_work);
>>> +
>>> + root = amdgpu_bo_ref(vm->root.bo);
>>> + amdgpu_bo_reserve(root, true);
>>> +
>>> + spin_lock(&vm->bo_free_lock);
>>> + while (!list_empty(&vm->bo_freed)) {
>>> + entry = list_first_entry(&vm->bo_freed, struct
>>> amdgpu_vm_bo_base,
>>> + vm_status);
>>
>> I don't see where you're removing the entry from the list. You need a
>> list_del_init here.
>>
>> If you're relying on the list_del call in amdgpu_vm_pt_free, that's
>> not safe because it's outside the spinlock. This could end up
>> corrupting the list if another thread is concurrently adding to the
>> same list.
>
> It's much easier to use list_splice() to move the list to a local head
> and then traverse it with list_for_each_entry_safe() and free up
> everything.
>
Use list_del_init can fix the race, it is simpler to use local list head
and the traverse it. Will send out new patch.
Regards,
Philip
> Regards,
> Christian.
>
>>
>> Regards,
>> Felix
>>
>>
>>> + spin_unlock(&vm->bo_free_lock);
>>> +
>>> + amdgpu_vm_pt_free(entry, false);
>>> +
>>> + spin_lock(&vm->bo_free_lock);
>>> + }
>>> + spin_unlock(&vm->bo_free_lock);
>>> +
>>> + amdgpu_bo_unreserve(root);
>>> + amdgpu_bo_unref(&root);
>>> +}
>>> +
>>> /**
>>> * amdgpu_vm_pt_free_dfs - free PD/PT levels
>>> *
>>> @@ -652,16 +689,17 @@ static void amdgpu_vm_pt_free(struct
>>> amdgpu_vm_bo_base *entry)
>>> */
>>> static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
>>> struct amdgpu_vm *vm,
>>> - struct amdgpu_vm_pt_cursor *start)
>>> + struct amdgpu_vm_pt_cursor *start,
>>> + bool unlocked)
>>> {
>>> struct amdgpu_vm_pt_cursor cursor;
>>> struct amdgpu_vm_bo_base *entry;
>>> for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
>>> - amdgpu_vm_pt_free(entry);
>>> + amdgpu_vm_pt_free(entry, unlocked);
>>> if (start)
>>> - amdgpu_vm_pt_free(start->entry);
>>> + amdgpu_vm_pt_free(start->entry, unlocked);
>>> }
>>> /**
>>> @@ -673,7 +711,7 @@ static void amdgpu_vm_pt_free_dfs(struct
>>> amdgpu_device *adev,
>>> */
>>> void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct
>>> amdgpu_vm *vm)
>>> {
>>> - amdgpu_vm_pt_free_dfs(adev, vm, NULL);
>>> + amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
>>> }
>>> /**
>>> @@ -966,7 +1004,8 @@ int amdgpu_vm_ptes_update(struct
>>> amdgpu_vm_update_params *params,
>>> if (cursor.entry->bo) {
>>> params->table_freed = true;
>>> amdgpu_vm_pt_free_dfs(adev, params->vm,
>>> - &cursor);
>>> + &cursor,
>>> + params->unlocked);
>>> }
>>> amdgpu_vm_pt_next(adev, &cursor);
>>> }
>
More information about the amd-gfx
mailing list