[PATCH v5 1/1] drm/amdgpu: Fix amdgpu_vm_pt_free warning
Christian König
christian.koenig at amd.com
Thu Sep 15 14:54:03 UTC 2022
Am 15.09.22 um 16:39 schrieb Philip Yang:
>
> On 2022-09-15 02:21, Christian König wrote:
>> Am 14.09.22 um 19:45 schrieb Felix Kuehling:
>>> Am 2022-09-14 um 12:08 schrieb Philip Yang:
>>>> Free page table BO from vm resv unlocked context generate below
>>>> warnings.
>>>>
>>>> Add a pt_free_work in vm to free page table BO from vm->pt_freed list.
>>>> pass vm resv unlock status from page table update caller, and add
>>>> vm_bo
>>>> entry to vm->pt_freed list and schedule the pt_free_work if calling
>>>> with
>>>> vm resv unlocked.
>>>>
>>>> WARNING: CPU: 12 PID: 3238 at
>>>> drivers/gpu/drm/ttm/ttm_bo.c:106 ttm_bo_set_bulk_move+0xa1/0xc0
>>>> Call Trace:
>>>> amdgpu_vm_pt_free+0x42/0xd0 [amdgpu]
>>>> amdgpu_vm_pt_free_dfs+0xb3/0xf0 [amdgpu]
>>>> amdgpu_vm_ptes_update+0x52d/0x850 [amdgpu]
>>>> amdgpu_vm_update_range+0x2a6/0x640 [amdgpu]
>>>> svm_range_unmap_from_gpus+0x110/0x300 [amdgpu]
>>>> svm_range_cpu_invalidate_pagetables+0x535/0x600 [amdgpu]
>>>> __mmu_notifier_invalidate_range_start+0x1cd/0x230
>>>> unmap_vmas+0x9d/0x140
>>>> unmap_region+0xa8/0x110
>>>>
>>>> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
>>>> ---
>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 +++
>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 ++++
>>>> drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 41
>>>> +++++++++++++++++++++--
>>>> 3 files changed, 49 insertions(+), 3 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> index 59cac347baa3..20cfc8c9635b 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>>> @@ -2022,6 +2022,9 @@ int amdgpu_vm_init(struct amdgpu_device
>>>> *adev, struct amdgpu_vm *vm)
>>>> spin_lock_init(&vm->invalidated_lock);
>>>> INIT_LIST_HEAD(&vm->freed);
>>>> INIT_LIST_HEAD(&vm->done);
>>>> + INIT_LIST_HEAD(&vm->pt_freed);
>>>> + INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
>>>> + spin_lock_init(&vm->pt_free_lock);
>>>> /* create scheduler entities for page table updates */
>>>> r = drm_sched_entity_init(&vm->immediate,
>>>> DRM_SCHED_PRIORITY_NORMAL,
>>>> @@ -2223,6 +2226,8 @@ void amdgpu_vm_fini(struct amdgpu_device
>>>> *adev, struct amdgpu_vm *vm)
>>>> amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
>>>> + flush_work(&vm->pt_free_work);
>>>> +
>>>> root = amdgpu_bo_ref(vm->root.bo);
>>>> amdgpu_bo_reserve(root, true);
>>>> amdgpu_vm_set_pasid(adev, vm, 0);
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> index 9ecb7f663e19..b77fe838c327 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>>> @@ -276,6 +276,11 @@ struct amdgpu_vm {
>>>> /* BOs which are invalidated, has been updated in the PTs */
>>>> struct list_head done;
>>>> + /* PT BOs scheduled to free and fill with zero if vm_resv is
>>>> not hold */
>>>> + struct list_head pt_freed;
>>>> + struct work_struct pt_free_work;
>>>> + spinlock_t pt_free_lock;
>>>> +
>>>> /* contains the page directory */
>>>> struct amdgpu_vm_bo_base root;
>>>> struct dma_fence *last_update;
>>>> @@ -471,6 +476,7 @@ int amdgpu_vm_pde_update(struct
>>>> amdgpu_vm_update_params *params,
>>>> int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
>>>> uint64_t start, uint64_t end,
>>>> uint64_t dst, uint64_t flags);
>>>> +void amdgpu_vm_pt_free_work(struct work_struct *work);
>>>> #if defined(CONFIG_DEBUG_FS)
>>>> void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct
>>>> seq_file *m);
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>>> index 88de9f0d4728..c16579471f22 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>>>> @@ -641,6 +641,27 @@ static void amdgpu_vm_pt_free(struct
>>>> amdgpu_vm_bo_base *entry)
>>>> amdgpu_bo_unref(&entry->bo);
>>>> }
>>>> +void amdgpu_vm_pt_free_work(struct work_struct *work)
>>>> +{
>>>> + struct amdgpu_vm_bo_base *entry, *next;
>>>> + struct amdgpu_vm *vm;
>>>> + LIST_HEAD(pt_freed);
>>>> +
>>>> + vm = container_of(work, struct amdgpu_vm, pt_free_work);
>>>> +
>>>> + spin_lock(&vm->pt_free_lock);
>>>> + list_splice_init(&vm->pt_freed, &pt_freed);
>>>> + spin_unlock(&vm->pt_free_lock);
>>>> +
>>>> + // flush_work in amdgpu_vm_fini ensure vm->root.bo is valid
>>>> + amdgpu_bo_reserve(vm->root.bo, true);
>>>> +
>>>> + list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
>>>> + amdgpu_vm_pt_free(entry);
>>>> +
>>>> + amdgpu_bo_unreserve(vm->root.bo);
>>>> +}
>>>> +
>>>> /**
>>>> * amdgpu_vm_pt_free_dfs - free PD/PT levels
>>>> *
>>>> @@ -652,11 +673,24 @@ static void amdgpu_vm_pt_free(struct
>>>> amdgpu_vm_bo_base *entry)
>>>> */
>>>> static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
>>>> struct amdgpu_vm *vm,
>>>> - struct amdgpu_vm_pt_cursor *start)
>>>> + struct amdgpu_vm_pt_cursor *start,
>>>> + bool unlocked)
>>>> {
>>>> struct amdgpu_vm_pt_cursor cursor;
>>>> struct amdgpu_vm_bo_base *entry;
>>>> + if (unlocked) {
>>>> + spin_lock(&vm->pt_free_lock);
>>>> + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor,
>>>> entry)
>>>> + list_move(&entry->vm_status, &vm->pt_freed);
>>>> +
>>>> + if (start)
>>>> + list_move(&start->entry->vm_status, &vm->pt_freed);
>>>> + spin_unlock(&vm->pt_free_lock);
>>>
>>> Question for Christian: list_move will take the entry off another
>>> vm_status list (evicted, idle, invalidated, relocated). I don't
>>> think this is safe without holding a reservation lock in most cases.
>>> Since the point here is to maintain a list of PTs to be freed
>>> without locking a reservation, is the vm_status really the right way
>>> to do this?
>>
>> Oh, good point. This means we need to protect the whole state machine
>> with a spinlock or use a separate deleted list.
>>
>> I don't see how we can easily use a separate list here, but we
>> already have the invalidated_lock, maybe we need to rename this one
>> to status lock and use it for all cases.
>
> Rename invalidate_lock to status_lock, then we can use it for
> vm->pt_freed list, don't need another vm->pt_free_lock, this is good.
>
> invalidate_lock is used to protect list vm->invalidate and vm->done
> right now, other vm list evicted/relocated/moved/idle/freed are
> protected by reservation lock. Do we need protect all vm lists
> operations with status_lock as well to ensure it is safe to take entry
> from vm_status lists for unlocked context?
Yes, that's probably the best approach.
In theory we could skip some states because those state changes only
happen with resv lock held, but I think it's better to be save than
sorry here.
Christian.
>
> Regards,
>
> Philip
>
>>
>> Regards,
>> Christian.
>>
>>>
>>> Regards,
>>> Felix
>>>
>>>
>>>> + schedule_work(&vm->pt_free_work);
>>>> + return;
>>>> + }
>>>> +
>>>> for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
>>>> amdgpu_vm_pt_free(entry);
>>>> @@ -673,7 +707,7 @@ static void amdgpu_vm_pt_free_dfs(struct
>>>> amdgpu_device *adev,
>>>> */
>>>> void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct
>>>> amdgpu_vm *vm)
>>>> {
>>>> - amdgpu_vm_pt_free_dfs(adev, vm, NULL);
>>>> + amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
>>>> }
>>>> /**
>>>> @@ -966,7 +1000,8 @@ int amdgpu_vm_ptes_update(struct
>>>> amdgpu_vm_update_params *params,
>>>> if (cursor.entry->bo) {
>>>> params->table_freed = true;
>>>> amdgpu_vm_pt_free_dfs(adev, params->vm,
>>>> - &cursor);
>>>> + &cursor,
>>>> + params->unlocked);
>>>> }
>>>> amdgpu_vm_pt_next(adev, &cursor);
>>>> }
>>
More information about the amd-gfx
mailing list