[PATCH 5/5] drm/amdgpu: drop client_id from VM

Chunming Zhou zhoucm1 at amd.com
Thu Dec 21 08:39:44 UTC 2017


The patch itself is Reviewed-by: Chunming Zhou <david1.zhou at amd.com>

Another question,

vice versa, Could PASID directly use fence context if we switch to use 
fence context as client id? any bits limited in hw to present PASID?


Regards,
David Zhou

On 2017年12月21日 16:15, Christian König wrote:
> The problem is PASID would never work correctly, it is only 16bits and 
> so gets reused quite often.
>
> But we introduced the client_id because we needed an unique 64bit 
> identifier which never repeats.
>
> Felix probably didn't knew that when he added the comment.
>
> Regards,
> Christian.
>
> Am 21.12.2017 um 04:05 schrieb Chunming Zhou:
>> I agree using fence_context is same efficiency with original client 
>> id, since vm only has one sdma entity.
>>
>> But PASID seems be more proper with per-VM-per-process concept, the 
>> comment also said that.
>>
>> Regards,
>> David Zhou
>> On 2017年12月20日 21:21, Christian König wrote:
>>> Use the fence context from the scheduler entity.
>>>
>>> Signed-off-by: Christian König <christian.koenig at amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 8 ++++----
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 2 --
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  | 4 ----
>>>   3 files changed, 4 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>> index d24884b419cb..16884a0b677b 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
>>> @@ -115,7 +115,7 @@ static int 
>>> amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>>>         flushed  = id->flushed_updates;
>>>       if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
>>> -        (atomic64_read(&id->owner) != vm->client_id) ||
>>> +        (atomic64_read(&id->owner) != vm->entity.fence_context) ||
>>>           (job->vm_pd_addr != id->pd_gpu_addr) ||
>>>           (updates && (!flushed || updates->context != 
>>> flushed->context ||
>>>               dma_fence_is_later(updates, flushed))) ||
>>> @@ -144,7 +144,7 @@ static int 
>>> amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>>>           id->flushed_updates = dma_fence_get(updates);
>>>       }
>>>       id->pd_gpu_addr = job->vm_pd_addr;
>>> -    atomic64_set(&id->owner, vm->client_id);
>>> +    atomic64_set(&id->owner, vm->entity.fence_context);
>>>       job->vm_needs_flush = needs_flush;
>>>       if (needs_flush) {
>>>           dma_fence_put(id->last_flush);
>>> @@ -242,7 +242,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>>> struct amdgpu_ring *ring,
>>>           if (amdgpu_vmid_had_gpu_reset(adev, id))
>>>               continue;
>>>   -        if (atomic64_read(&id->owner) != vm->client_id)
>>> +        if (atomic64_read(&id->owner) != vm->entity.fence_context)
>>>               continue;
>>>             if (job->vm_pd_addr != id->pd_gpu_addr)
>>> @@ -291,7 +291,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, 
>>> struct amdgpu_ring *ring,
>>>       id->pd_gpu_addr = job->vm_pd_addr;
>>>       dma_fence_put(id->flushed_updates);
>>>       id->flushed_updates = dma_fence_get(updates);
>>> -    atomic64_set(&id->owner, vm->client_id);
>>> +    atomic64_set(&id->owner, vm->entity.fence_context);
>>>     needs_flush:
>>>       job->vm_needs_flush = true;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> index 946bc21c6d7d..af7dceb7131e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>>> @@ -2256,7 +2256,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, 
>>> struct amdgpu_vm *vm,
>>>       uint64_t init_pde_value = 0;
>>>         vm->va = RB_ROOT_CACHED;
>>> -    vm->client_id = 
>>> atomic64_inc_return(&adev->vm_manager.client_counter);
>>>       for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
>>>           vm->reserved_vmid[i] = NULL;
>>>       spin_lock_init(&vm->status_lock);
>>> @@ -2502,7 +2501,6 @@ void amdgpu_vm_manager_init(struct 
>>> amdgpu_device *adev)
>>>           adev->vm_manager.seqno[i] = 0;
>>>         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
>>> -    atomic64_set(&adev->vm_manager.client_counter, 0);
>>>       spin_lock_init(&adev->vm_manager.prt_lock);
>>>       atomic_set(&adev->vm_manager.num_prt_users, 0);
>>>   diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> index 78296d1a5b2f..21a80f1bb2b9 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>>> @@ -191,8 +191,6 @@ struct amdgpu_vm {
>>>       /* Scheduler entity for page table updates */
>>>       struct drm_sched_entity    entity;
>>>   -    /* client id and PASID (TODO: replace client_id with PASID) */
>>> -    u64                     client_id;
>>>       unsigned int        pasid;
>>>       /* dedicated to vm */
>>>       struct amdgpu_vmid    *reserved_vmid[AMDGPU_MAX_VMHUBS];
>>> @@ -230,8 +228,6 @@ struct amdgpu_vm_manager {
>>>       struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
>>>       unsigned                vm_pte_num_rings;
>>>       atomic_t                vm_pte_next_ring;
>>> -    /* client id counter */
>>> -    atomic64_t                client_counter;
>>>         /* partial resident texture handling */
>>>       spinlock_t                prt_lock;
>>
>



More information about the amd-gfx mailing list