[PATCH 5/6] drm/amdgpu: implement grab reserved vmid V3
Zhang, Jerry (Junwei)
Jerry.Zhang at amd.com
Thu Apr 27 05:54:39 UTC 2017
On 04/27/2017 01:00 PM, Chunming Zhou wrote:
> v2: move sync waiting only when flush needs
> v3: fix racy
>
> Change-Id: I64da2701c9fdcf986afb90ba1492a78d5bef1b6c
> Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 ++++++++++++++++++++++++++++++++--
> 1 file changed, 68 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index e6fdfa4..7752279 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -397,6 +397,65 @@ static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
> atomic_read(&adev->gpu_reset_counter);
> }
>
> +static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
> +{
> + return !!vm->reserved_vmid[vmhub];
> +}
It may be better to populate it into alloc reserved_vmid func as well, getting
easy maintenance in the future.
> +
> +/* idr_mgr->lock must be held */
> +static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
> + struct amdgpu_ring *ring,
> + struct amdgpu_sync *sync,
> + struct fence *fence,
> + struct amdgpu_job *job)
> +{
> + struct amdgpu_device *adev = ring->adev;
> + unsigned vmhub = ring->funcs->vmhub;
> + struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
> + struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> + struct fence *updates = sync->last_vm_update;
> + int r = 0;
> + struct fence *flushed, *tmp;
> + bool needs_flush = false;
> +
> + flushed = id->flushed_updates;
> + if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
> + (atomic64_read(&id->owner) != vm->client_id) ||
> + (job->vm_pd_addr != id->pd_gpu_addr) ||
> + (updates && (!flushed || updates->context != flushed->context ||
> + fence_is_later(updates, flushed)))) {
> + needs_flush = true;
> + tmp = amdgpu_sync_get_fence(&id->active);
> + if (tmp) {
> + r = amdgpu_sync_fence(adev, sync, tmp);
> + fence_put(tmp);
> + return r;
Sorry, I didn't catch up this.
Could you give me some tips?
(in this case, we no need to update id info as below?)
Jerry
> + }
> + }
> +
> + /* Good we can use this VMID. Remember this submission as
> + * user of the VMID.
> + */
> + r = amdgpu_sync_fence(ring->adev, &id->active, fence);
> + if (r)
> + goto out;
> +
> + if (updates && (!flushed || updates->context != flushed->context ||
> + fence_is_later(updates, flushed))) {
> + fence_put(id->flushed_updates);
> + id->flushed_updates = fence_get(updates);
> + }
> + id->pd_gpu_addr = job->vm_pd_addr;
> + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
> + atomic64_set(&id->owner, vm->client_id);
> + job->vm_needs_flush = needs_flush;
> +
> + job->vm_id = id - id_mgr->ids;
> + trace_amdgpu_vm_grab_id(vm, ring, job);
> +out:
> + return r;
> +}
> +
> /**
> * amdgpu_vm_grab_id - allocate the next free VMID
> *
> @@ -421,12 +480,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> unsigned i;
> int r = 0;
>
> + mutex_lock(&id_mgr->lock);
> + if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
> + r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
> + mutex_unlock(&id_mgr->lock);
> + return r;
> + }
> fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
> - if (!fences)
> + if (!fences) {
> + mutex_unlock(&id_mgr->lock);
> return -ENOMEM;
> -
> - mutex_lock(&id_mgr->lock);
> -
> + }
> /* Check if we have an idle VMID */
> i = 0;
> list_for_each_entry(idle, &id_mgr->ids_lru, list) {
>
More information about the amd-gfx
mailing list