[PATCH 4/5] amdgpu/cs: split out fence dependency checking
Dave Airlie
airlied at gmail.com
Wed May 24 07:06:14 UTC 2017
From: Dave Airlie <airlied at redhat.com>
This just splits out the fence depenency checking into it's
own function to make it easier to add semaphore dependencies.
Reviewed-by: Christian König <christian.koenig at amd.com>
Signed-off-by: Dave Airlie <airlied at redhat.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 85 +++++++++++++++++++---------------
1 file changed, 47 insertions(+), 38 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e6b950..30225d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -995,56 +995,65 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *p)
+static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- int i, j, r;
-
- for (i = 0; i < p->nchunks; ++i) {
- struct drm_amdgpu_cs_chunk_dep *deps;
- struct amdgpu_cs_chunk *chunk;
- unsigned num_deps;
+ unsigned num_deps;
+ int i, r;
+ struct drm_amdgpu_cs_chunk_dep *deps;
- chunk = &p->chunks[i];
+ deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_dep);
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
- continue;
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
+ struct dma_fence *fence;
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_dep);
+ r = amdgpu_cs_get_ring(p->adev, deps[i].ip_type,
+ deps[i].ip_instance,
+ deps[i].ring, &ring);
+ if (r)
+ return r;
- for (j = 0; j < num_deps; ++j) {
- struct amdgpu_ring *ring;
- struct amdgpu_ctx *ctx;
- struct dma_fence *fence;
+ ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
- r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
- deps[j].ip_instance,
- deps[j].ring, &ring);
+ fence = amdgpu_ctx_get_fence(ctx, ring,
+ deps[i].handle);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
+ amdgpu_ctx_put(ctx);
+ return r;
+ } else if (fence) {
+ r = amdgpu_sync_fence(p->adev, &p->job->sync,
+ fence);
+ dma_fence_put(fence);
+ amdgpu_ctx_put(ctx);
if (r)
return r;
+ }
+ }
+ return 0;
+}
- ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
- if (ctx == NULL)
- return -EINVAL;
+static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
+ struct amdgpu_cs_parser *p)
+{
+ int i, r;
- fence = amdgpu_ctx_get_fence(ctx, ring,
- deps[j].handle);
- if (IS_ERR(fence)) {
- r = PTR_ERR(fence);
- amdgpu_ctx_put(ctx);
- return r;
+ for (i = 0; i < p->nchunks; ++i) {
+ struct amdgpu_cs_chunk *chunk;
- } else if (fence) {
- r = amdgpu_sync_fence(adev, &p->job->sync,
- fence);
- dma_fence_put(fence);
- amdgpu_ctx_put(ctx);
- if (r)
- return r;
- }
+ chunk = &p->chunks[i];
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
+ r = amdgpu_cs_process_fence_dep(p, chunk);
+ if (r)
+ return r;
}
}
--
2.9.4
More information about the amd-gfx
mailing list