[PATCH 1/1] drm/amdgpu: cleanup pasid handling

Das, Nirmoy nirmoy.das at amd.com
Wed Jun 23 08:43:11 UTC 2021


On 6/23/2021 10:23 AM, Christian König wrote:
> Am 23.06.21 um 09:56 schrieb Nirmoy Das:
>> Cleanup code related to vm pasid by adding helper functions.
>> Also replace idr with xarray as we actually need hash functionality.
>
> That looks quite a bit better than before, but I think we should 
> approach it differently.
>
> First of all make a patch which moves amdgpu_pasid_free() outside of 
> the VM code.
>
> We don't allocate the pasid inside the VM code for good reasons so we 
> shouldn't free it either.
>
> Then in a second patch make a function amdgpu_vm_set_pasid(adev, vm, 
> pasid);
>
> When the pasid is zero we remove the VM from the xarray, otherwise we 
> update the entry.


Thanks, will do that.


Nirmoy

>
> Thanks,
> Christian.
>
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 130 ++++++++++++-------------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |   3 +-
>>   2 files changed, 62 insertions(+), 71 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 63975bda8e76..abba1e2de264 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -87,6 +87,45 @@ struct amdgpu_prt_cb {
>>       struct dma_fence_cb cb;
>>   };
>>   +static int amdgpu_vm_pasid_insert(struct amdgpu_device *adev,
>> +                  struct amdgpu_vm *vm,
>> +                  unsigned long pasid,
>> +                  unsigned int *vm_pasid)
>> +{
>> +    unsigned long flags;
>> +    int r;
>> +
>> +    if (!pasid)
>> +        return 0;
>> +
>> +    xa_lock_irqsave(&adev->vm_manager.pasids, flags);
>> +    r = xa_err(__xa_store(&adev->vm_manager.pasids, pasid, vm, 
>> GFP_ATOMIC));
>> +    xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
>> +    if (r < 0)
>> +        return r;
>> +    if (vm_pasid)
>> +        *vm_pasid = pasid;
>> +
>> +    return 0;
>> +}
>> +
>> +static void amdgpu_vm_pasid_remove(struct amdgpu_device *adev,
>> +                   unsigned long pasid,
>> +                   unsigned int *vm_pasid)
>> +{
>> +    unsigned long flags;
>> +
>> +    if (!pasid)
>> +        return;
>> +
>> +    xa_lock_irqsave(&adev->vm_manager.pasids, flags);
>> +    __xa_erase(&adev->vm_manager.pasids, pasid);
>> +    xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
>> +
>> +    if (vm_pasid)
>> +        *vm_pasid = 0;
>> +}
>> +
>>   /*
>>    * vm eviction_lock can be taken in MMU notifiers. Make sure no 
>> reclaim-FS
>>    * happens while holding this lock anywhere to prevent deadlocks when
>> @@ -2940,18 +2979,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, 
>> struct amdgpu_vm *vm, u32 pasid)
>>         amdgpu_bo_unreserve(vm->root.bo);
>>   -    if (pasid) {
>> -        unsigned long flags;
>> -
>> -        spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
>> -        r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid 
>> + 1,
>> -                  GFP_ATOMIC);
>> - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>> -        if (r < 0)
>> -            goto error_free_root;
>> -
>> -        vm->pasid = pasid;
>> -    }
>> +    r = amdgpu_vm_pasid_insert(adev, vm, pasid, &vm->pasid);
>> +    if (r)
>> +        goto error_free_root;
>>         INIT_KFIFO(vm->faults);
>>   @@ -3038,19 +3068,9 @@ int amdgpu_vm_make_compute(struct 
>> amdgpu_device *adev, struct amdgpu_vm *vm,
>>       r = amdgpu_vm_check_clean_reserved(adev, vm);
>>       if (r)
>>           goto unreserve_bo;
>> -
>> -    if (pasid) {
>> -        unsigned long flags;
>> -
>> -        spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
>> -        r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid 
>> + 1,
>> -                  GFP_ATOMIC);
>> - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>> -
>> -        if (r == -ENOSPC)
>> -            goto unreserve_bo;
>> -        r = 0;
>> -    }
>> +    r = amdgpu_vm_pasid_insert(adev, vm, pasid, NULL);
>> +    if (r)
>> +        goto unreserve_bo;
>>         /* Check if PD needs to be reinitialized and do it before
>>        * changing any other state, in case it fails.
>> @@ -3089,35 +3109,23 @@ int amdgpu_vm_make_compute(struct 
>> amdgpu_device *adev, struct amdgpu_vm *vm,
>>       vm->is_compute_context = true;
>>         if (vm->pasid) {
>> -        unsigned long flags;
>> -
>> -        spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
>> -        idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
>> - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>> -
>>           /* Free the original amdgpu allocated pasid
>>            * Will be replaced with kfd allocated pasid
>>            */
>>           amdgpu_pasid_free(vm->pasid);
>> -        vm->pasid = 0;
>> +        amdgpu_vm_pasid_remove(adev, vm->pasid, &vm->pasid);
>>       }
>>         /* Free the shadow bo for compute VM */
>> amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
>> -
>>       if (pasid)
>>           vm->pasid = pasid;
>>         goto unreserve_bo;
>>     free_idr:
>> -    if (pasid) {
>> -        unsigned long flags;
>> +    amdgpu_vm_pasid_remove(adev, pasid, NULL);
>>   - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
>> -        idr_remove(&adev->vm_manager.pasid_idr, pasid);
>> - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>> -    }
>>   unreserve_bo:
>>       amdgpu_bo_unreserve(vm->root.bo);
>>       return r;
>> @@ -3133,14 +3141,7 @@ int amdgpu_vm_make_compute(struct 
>> amdgpu_device *adev, struct amdgpu_vm *vm,
>>    */
>>   void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct 
>> amdgpu_vm *vm)
>>   {
>> -    if (vm->pasid) {
>> -        unsigned long flags;
>> -
>> -        spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
>> -        idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
>> - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>> -    }
>> -    vm->pasid = 0;
>> +    amdgpu_vm_pasid_remove(adev, vm->pasid, &vm->pasid);
>>       vm->is_compute_context = false;
>>   }
>>   @@ -3164,15 +3165,7 @@ void amdgpu_vm_fini(struct amdgpu_device 
>> *adev, struct amdgpu_vm *vm)
>>         root = amdgpu_bo_ref(vm->root.bo);
>>       amdgpu_bo_reserve(root, true);
>> -    if (vm->pasid) {
>> -        unsigned long flags;
>> -
>> -        spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
>> -        idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
>> - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>> -        vm->pasid = 0;
>> -    }
>> -
>> +    amdgpu_vm_pasid_remove(adev, vm->pasid, &vm->pasid);
>>       dma_fence_wait(vm->last_unlocked, false);
>>       dma_fence_put(vm->last_unlocked);
>>   @@ -3254,8 +3247,7 @@ void amdgpu_vm_manager_init(struct 
>> amdgpu_device *adev)
>>       adev->vm_manager.vm_update_mode = 0;
>>   #endif
>>   -    idr_init(&adev->vm_manager.pasid_idr);
>> -    spin_lock_init(&adev->vm_manager.pasid_lock);
>> +    xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
>>   }
>>     /**
>> @@ -3267,8 +3259,8 @@ void amdgpu_vm_manager_init(struct 
>> amdgpu_device *adev)
>>    */
>>   void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
>>   {
>> -    WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
>> -    idr_destroy(&adev->vm_manager.pasid_idr);
>> +    WARN_ON(!xa_empty(&adev->vm_manager.pasids));
>> +    xa_destroy(&adev->vm_manager.pasids);
>>         amdgpu_vmid_mgr_fini(adev);
>>   }
>> @@ -3337,13 +3329,13 @@ void amdgpu_vm_get_task_info(struct 
>> amdgpu_device *adev, u32 pasid,
>>       struct amdgpu_vm *vm;
>>       unsigned long flags;
>>   -    spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
>> +    xa_lock_irqsave(&adev->vm_manager.pasids, flags);
>>   -    vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
>> +    vm = xa_load(&adev->vm_manager.pasids, pasid);
>>       if (vm)
>>           *task_info = vm->task_info;
>>   - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>> +    xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
>>   }
>>     /**
>> @@ -3385,15 +3377,15 @@ bool amdgpu_vm_handle_fault(struct 
>> amdgpu_device *adev, u32 pasid,
>>       struct amdgpu_vm *vm;
>>       int r;
>>   -    spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
>> -    vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
>> +    xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
>> +    vm = xa_load(&adev->vm_manager.pasids, pasid);
>>       if (vm) {
>>           root = amdgpu_bo_ref(vm->root.bo);
>>           is_compute_context = vm->is_compute_context;
>>       } else {
>>           root = NULL;
>>       }
>> -    spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
>> +    xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
>>         if (!root)
>>           return false;
>> @@ -3411,11 +3403,11 @@ bool amdgpu_vm_handle_fault(struct 
>> amdgpu_device *adev, u32 pasid,
>>           goto error_unref;
>>         /* Double check that the VM still exists */
>> -    spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
>> -    vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
>> +    xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
>> +    vm = xa_load(&adev->vm_manager.pasids, pasid);
>>       if (vm && vm->root.bo != root)
>>           vm = NULL;
>> -    spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
>> +    xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
>>       if (!vm)
>>           goto error_unlock;
>>   diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> index ddb85a85cbba..31c467764162 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> @@ -359,8 +359,7 @@ struct amdgpu_vm_manager {
>>       /* PASID to VM mapping, will be used in interrupt context to
>>        * look up VM of a page fault
>>        */
>> -    struct idr                pasid_idr;
>> -    spinlock_t                pasid_lock;
>> +    struct xarray                pasids;
>>   };
>>     struct amdgpu_bo_va_mapping;
>


More information about the amd-gfx mailing list