[PATCH 7/8] amdgpu/cs: split out fence dependency checking
Christian König
deathsimple at vodafone.de
Tue Apr 4 07:37:26 UTC 2017
Am 04.04.2017 um 06:27 schrieb Dave Airlie:
> From: Dave Airlie <airlied at redhat.com>
>
> This just splits out the fence depenency checking into it's
> own function to make it easier to add semaphore dependencies.
>
> Signed-off-by: Dave Airlie <airlied at redhat.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 86 +++++++++++++++++++---------------
> 1 file changed, 48 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 99424cb..4671432 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -963,56 +963,66 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
> return 0;
> }
>
> -static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
> - struct amdgpu_cs_parser *p)
> +static int amdgpu_process_fence_dep(struct amdgpu_device *adev,
> + struct amdgpu_cs_parser *p,
> + struct amdgpu_cs_chunk *chunk)
adev is actually also available as p->adev.
But the old code got this wrong as well, so either way the patch is
Reviewed-by: Christian König <christian.koenig at amd.com>.
Regards,
Christian.
> {
> struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
> - int i, j, r;
> -
> - for (i = 0; i < p->nchunks; ++i) {
> - struct drm_amdgpu_cs_chunk_dep *deps;
> - struct amdgpu_cs_chunk *chunk;
> - unsigned num_deps;
> + unsigned num_deps;
> + int i, r;
> + struct drm_amdgpu_cs_chunk_dep *deps;
>
> - chunk = &p->chunks[i];
> + deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
> + num_deps = chunk->length_dw * 4 /
> + sizeof(struct drm_amdgpu_cs_chunk_dep);
>
> - if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
> - continue;
> + for (i = 0; i < num_deps; ++i) {
> + struct amdgpu_ring *ring;
> + struct amdgpu_ctx *ctx;
> + struct dma_fence *fence;
>
> - deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
> - num_deps = chunk->length_dw * 4 /
> - sizeof(struct drm_amdgpu_cs_chunk_dep);
> + r = amdgpu_cs_get_ring(adev, deps[i].ip_type,
> + deps[i].ip_instance,
> + deps[i].ring, &ring);
> + if (r)
> + return r;
>
> - for (j = 0; j < num_deps; ++j) {
> - struct amdgpu_ring *ring;
> - struct amdgpu_ctx *ctx;
> - struct dma_fence *fence;
> + ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
> + if (ctx == NULL)
> + return -EINVAL;
>
> - r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
> - deps[j].ip_instance,
> - deps[j].ring, &ring);
> + fence = amdgpu_ctx_get_fence(ctx, ring,
> + deps[i].handle);
> + if (IS_ERR(fence)) {
> + r = PTR_ERR(fence);
> + amdgpu_ctx_put(ctx);
> + return r;
> + } else if (fence) {
> + r = amdgpu_sync_fence(adev, &p->job->sync,
> + fence);
> + dma_fence_put(fence);
> + amdgpu_ctx_put(ctx);
> if (r)
> return r;
> + }
> + }
> + return 0;
> +}
>
> - ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
> - if (ctx == NULL)
> - return -EINVAL;
> +static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
> + struct amdgpu_cs_parser *p)
> +{
> + int i, r;
>
> - fence = amdgpu_ctx_get_fence(ctx, ring,
> - deps[j].handle);
> - if (IS_ERR(fence)) {
> - r = PTR_ERR(fence);
> - amdgpu_ctx_put(ctx);
> - return r;
> + for (i = 0; i < p->nchunks; ++i) {
> + struct amdgpu_cs_chunk *chunk;
>
> - } else if (fence) {
> - r = amdgpu_sync_fence(adev, &p->job->sync,
> - fence);
> - dma_fence_put(fence);
> - amdgpu_ctx_put(ctx);
> - if (r)
> - return r;
> - }
> + chunk = &p->chunks[i];
> +
> + if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
> + r = amdgpu_process_fence_dep(adev, p, chunk);
> + if (r)
> + return r;
> }
> }
>
More information about the dri-devel
mailing list