[PATCH 2/8] drm/amdgpu: split finding idle VMID into separate function

Chunming Zhou zhoucm1 at amd.com
Thu Feb 1 05:40:39 UTC 2018


Reviewed-by: Chunming Zhou <david1.zhou at amd.com>


On 2018年01月31日 23:47, Christian König wrote:
> No functional change, but makes it easier to maintain the code.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 116 +++++++++++++++++++-------------
>   1 file changed, 69 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> index 7a3d0de7425d..fbe958f7cb5b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
> @@ -182,6 +182,72 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
>   		atomic_read(&adev->gpu_reset_counter);
>   }
>   
> +/**
> + * amdgpu_vm_grab_idle - grab idle VMID
> + *
> + * @vm: vm to allocate id for
> + * @ring: ring we want to submit job to
> + * @sync: sync object where we add dependencies
> + * @idle: resulting idle VMID
> + *
> + * Try to find an idle VMID, if none is idle add a fence to wait to the sync
> + * object. Returns -ENOMEM when we are out of memory.
> + */
> +static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
> +				 struct amdgpu_ring *ring,
> +				 struct amdgpu_sync *sync,
> +				 struct amdgpu_vmid **idle)
> +{
> +	struct amdgpu_device *adev = ring->adev;
> +	unsigned vmhub = ring->funcs->vmhub;
> +	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> +	struct dma_fence **fences;
> +	unsigned i;
> +	int r;
> +
> +	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
> +	if (!fences)
> +		return -ENOMEM;
> +
> +	/* Check if we have an idle VMID */
> +	i = 0;
> +	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
> +		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
> +		if (!fences[i])
> +			break;
> +		++i;
> +	}
> +
> +	/* If we can't find a idle VMID to use, wait till one becomes available */
> +	if (&(*idle)->list == &id_mgr->ids_lru) {
> +		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
> +		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
> +		struct dma_fence_array *array;
> +		unsigned j;
> +
> +		*idle = NULL;
> +		for (j = 0; j < i; ++j)
> +			dma_fence_get(fences[j]);
> +
> +		array = dma_fence_array_create(i, fences, fence_context,
> +					       seqno, true);
> +		if (!array) {
> +			for (j = 0; j < i; ++j)
> +				dma_fence_put(fences[j]);
> +			kfree(fences);
> +			return -ENOMEM;
> +		}
> +
> +		r = amdgpu_sync_fence(adev, sync, &array->base, false);
> +		dma_fence_put(&array->base);
> +		return r;
> +
> +	}
> +	kfree(fences);
> +
> +	return 0;
> +}
> +
>   /* idr_mgr->lock must be held */
>   static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
>   					    struct amdgpu_ring *ring,
> @@ -263,56 +329,12 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
>   	uint64_t fence_context = adev->fence_context + ring->idx;
>   	struct dma_fence *updates = sync->last_vm_update;
>   	struct amdgpu_vmid *id, *idle;
> -	struct dma_fence **fences;
> -	unsigned i;
>   	int r = 0;
>   
>   	mutex_lock(&id_mgr->lock);
> -	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
> -	if (!fences) {
> -		mutex_unlock(&id_mgr->lock);
> -		return -ENOMEM;
> -	}
> -	/* Check if we have an idle VMID */
> -	i = 0;
> -	list_for_each_entry(idle, &id_mgr->ids_lru, list) {
> -		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
> -		if (!fences[i])
> -			break;
> -		++i;
> -	}
> -
> -	/* If we can't find a idle VMID to use, wait till one becomes available */
> -	if (&idle->list == &id_mgr->ids_lru) {
> -		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
> -		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
> -		struct dma_fence_array *array;
> -		unsigned j;
> -
> -		for (j = 0; j < i; ++j)
> -			dma_fence_get(fences[j]);
> -
> -		array = dma_fence_array_create(i, fences, fence_context,
> -					   seqno, true);
> -		if (!array) {
> -			for (j = 0; j < i; ++j)
> -				dma_fence_put(fences[j]);
> -			kfree(fences);
> -			r = -ENOMEM;
> -			goto error;
> -		}
> -
> -
> -		r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
> -		dma_fence_put(&array->base);
> -		if (r)
> -			goto error;
> -
> -		mutex_unlock(&id_mgr->lock);
> -		return 0;
> -
> -	}
> -	kfree(fences);
> +	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
> +	if (r || !idle)
> +		goto error;
>   
>   	if (vm->reserved_vmid[vmhub]) {
>   		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,



More information about the amd-gfx mailing list