[PATCH 4/5] drm/amdgpu: implement grab dedicated vmid
Christian König
deathsimple at vodafone.de
Fri Apr 21 17:09:11 UTC 2017
Am 21.04.2017 um 12:05 schrieb Chunming Zhou:
> Change-Id: I64da2701c9fdcf986afb90ba1492a78d5bef1b6c
> Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 60 ++++++++++++++++++++++++++++++++++
> 1 file changed, 60 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 21cca99..5764a14 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -408,6 +408,62 @@ bool amdgpu_vm_dedicated_vmid_ready(struct amdgpu_vm *vm)
> return true;
> }
>
> +int amdgpu_vm_grab_dedicated_vmid(struct amdgpu_vm *vm,
> + struct amdgpu_ring *ring,
> + struct amdgpu_sync *sync,
> + struct fence *fence,
> + struct amdgpu_job *job)
> +{
> + struct amdgpu_device *adev = ring->adev;
> + unsigned vmhub = ring->funcs->vmhub;
> + struct amdgpu_vm_id *id = vm->dedicated_vmid[vmhub];
> + struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> + struct fence *updates = sync->last_vm_update;
> + int r = 0;
> + struct fence *flushed, *tmp;
> + bool needs_flush = false;
> +
> + mutex_lock(&id_mgr->lock);
> + tmp = amdgpu_sync_get_fence(&id->active);
> + if (tmp) {
> + r = amdgpu_sync_fence(adev, sync, tmp);
> + fence_put(tmp);
> + mutex_unlock(&id_mgr->lock);
> + return r;
> + }
This only needs to be done when we flush the VMID, otherwise it is
unnecessary and really kills performance.
Christian.
> +
> + if (amdgpu_vm_had_gpu_reset(adev, id))
> + needs_flush = true;
> +
> + flushed = id->flushed_updates;
> + if (updates && (!flushed || updates->context != flushed->context ||
> + fence_is_later(updates, flushed)))
> + needs_flush = true;
> +
> + /* Good we can use this VMID. Remember this submission as
> + * user of the VMID.
> + */
> + r = amdgpu_sync_fence(ring->adev, &id->active, fence);
> + if (r)
> + goto out;
> +
> + if (updates && (!flushed || updates->context != flushed->context ||
> + fence_is_later(updates, flushed))) {
> + fence_put(id->flushed_updates);
> + id->flushed_updates = fence_get(updates);
> + }
> + id->pd_gpu_addr = job->vm_pd_addr;
> + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
> + atomic64_set(&id->owner, vm->client_id);
> + job->vm_needs_flush = needs_flush;
> +
> + job->vm_id = id - id_mgr->ids;
> + trace_amdgpu_vm_grab_id(vm, ring, job);
> +out:
> + mutex_unlock(&id_mgr->lock);
> + return r;
> +}
> +
> /**
> * amdgpu_vm_grab_id - allocate the next free VMID
> *
> @@ -432,6 +488,10 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> unsigned i;
> int r = 0;
>
> + if (amdgpu_vm_dedicated_vmid_ready(vm))
> + return amdgpu_vm_grab_dedicated_vmid(vm, ring, sync,
> + fence, job);
> +
> fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
> if (!fences)
> return -ENOMEM;
More information about the amd-gfx
mailing list