[PATCH 4/5] amdgpu/cs: split out fence dependency checking

zhoucm1 david1.zhou at amd.com
Wed Apr 26 06:53:35 UTC 2017



On 2017年04月26日 11:28, Dave Airlie wrote:
> From: Dave Airlie <airlied at redhat.com>
>
> This just splits out the fence depenency checking into it's
> own function to make it easier to add semaphore dependencies.
>
> Reviewed-by: Christian König <christian.koenig at amd.com>
> Signed-off-by: Dave Airlie <airlied at redhat.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 85 +++++++++++++++++++---------------
>   1 file changed, 47 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 99424cb..df25b32 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -963,56 +963,65 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
>   	return 0;
>   }
>   
> -static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
> -				  struct amdgpu_cs_parser *p)
> +static int amdgpu_process_fence_dep(struct amdgpu_cs_parser *p,
To consistent, we'd like amdgpu_cs prefix in amdgpu_cs.c, same as other 
patches.

Regards,
David Zhou
> +				    struct amdgpu_cs_chunk *chunk)
>   {
>   	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
> -	int i, j, r;
> -
> -	for (i = 0; i < p->nchunks; ++i) {
> -		struct drm_amdgpu_cs_chunk_dep *deps;
> -		struct amdgpu_cs_chunk *chunk;
> -		unsigned num_deps;
> +	unsigned num_deps;
> +	int i, r;
> +	struct drm_amdgpu_cs_chunk_dep *deps;
>   
> -		chunk = &p->chunks[i];
> +	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
> +	num_deps = chunk->length_dw * 4 /
> +		sizeof(struct drm_amdgpu_cs_chunk_dep);
>   
> -		if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
> -			continue;
> +	for (i = 0; i < num_deps; ++i) {
> +		struct amdgpu_ring *ring;
> +		struct amdgpu_ctx *ctx;
> +		struct dma_fence *fence;
>   
> -		deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
> -		num_deps = chunk->length_dw * 4 /
> -			sizeof(struct drm_amdgpu_cs_chunk_dep);
> +		r = amdgpu_cs_get_ring(p->adev, deps[i].ip_type,
> +				       deps[i].ip_instance,
> +				       deps[i].ring, &ring);
> +		if (r)
> +			return r;
>   
> -		for (j = 0; j < num_deps; ++j) {
> -			struct amdgpu_ring *ring;
> -			struct amdgpu_ctx *ctx;
> -			struct dma_fence *fence;
> +		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
> +		if (ctx == NULL)
> +			return -EINVAL;
>   
> -			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
> -					       deps[j].ip_instance,
> -					       deps[j].ring, &ring);
> +		fence = amdgpu_ctx_get_fence(ctx, ring,
> +					     deps[i].handle);
> +		if (IS_ERR(fence)) {
> +			r = PTR_ERR(fence);
> +			amdgpu_ctx_put(ctx);
> +			return r;
> +		} else if (fence) {
> +			r = amdgpu_sync_fence(p->adev, &p->job->sync,
> +					      fence);
> +			dma_fence_put(fence);
> +			amdgpu_ctx_put(ctx);
>   			if (r)
>   				return r;
> +		}
> +	}
> +	return 0;
> +}
>   
> -			ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
> -			if (ctx == NULL)
> -				return -EINVAL;
> +static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
> +				  struct amdgpu_cs_parser *p)
> +{
> +	int i, r;
>   
> -			fence = amdgpu_ctx_get_fence(ctx, ring,
> -						     deps[j].handle);
> -			if (IS_ERR(fence)) {
> -				r = PTR_ERR(fence);
> -				amdgpu_ctx_put(ctx);
> -				return r;
> +	for (i = 0; i < p->nchunks; ++i) {
> +		struct amdgpu_cs_chunk *chunk;
>   
> -			} else if (fence) {
> -				r = amdgpu_sync_fence(adev, &p->job->sync,
> -						      fence);
> -				dma_fence_put(fence);
> -				amdgpu_ctx_put(ctx);
> -				if (r)
> -					return r;
> -			}
> +		chunk = &p->chunks[i];
> +
> +		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
> +			r = amdgpu_process_fence_dep(p, chunk);
> +			if (r)
> +				return r;
>   		}
>   	}
>   



More information about the amd-gfx mailing list