[PATCH 19/22] drm/amdgpu: make amdgpu_job refcounted
Andres Rodriguez
andresx7 at gmail.com
Thu Mar 2 07:03:21 UTC 2017
The job structure is shared between multiple components, gpu_scheduler
and amdgpu. Make each user hold its own reference to simplify resource
free-ing.
This will also be useful for adding extra callbacks on job completion.
Signed-off-by: Andres Rodriguez <andresx7 at gmail.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 +++-
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++---
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 24 ++++++++++++++++++++----
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 29 ++++++++++++++---------------
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 ++----
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 18 +++++++-----------
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 23 ++++++++++-------------
7 files changed, 59 insertions(+), 51 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0676495..accb885 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -649,7 +649,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
void amdgpu_job_free_resources(struct amdgpu_job *job);
-void amdgpu_job_free(struct amdgpu_job *job);
+struct amdgpu_job *amdgpu_job_get(struct amdgpu_job *job);
+void amdgpu_job_put(struct amdgpu_job **job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct dma_fence **f);
@@ -991,6 +992,7 @@ struct amdgpu_cs_parser {
struct amdgpu_job {
struct amd_sched_job base;
+ struct kref refcount;
struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 19ce202..5e8431d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -714,7 +714,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
if (parser->job)
- amdgpu_job_free(parser->job);
+ amdgpu_job_put(&parser->job);
amdgpu_bo_unref(&parser->uf_entry.robj);
}
@@ -988,11 +988,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
int r;
job = p->job;
- p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
- amdgpu_job_free(job);
+ amdgpu_job_put(&job);
return r;
}
@@ -1004,6 +1003,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
+ amdgpu_job_get(job);
amd_sched_entity_push_job(&job->base);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index b445996..ca78ba5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -53,6 +53,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int priority,
if (!*job)
return -ENOMEM;
+ kref_init(&(*job)->refcount);
(*job)->adev = adev;
(*job)->vm = vm;
(*job)->priority = priority;
@@ -96,13 +97,14 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- dma_fence_put(job->fence);
- amdgpu_sync_free(&job->sync);
- kfree(job);
+ amdgpu_job_put(&job);
}
-void amdgpu_job_free(struct amdgpu_job *job)
+static void amdgpu_job_free(struct kref *ref)
{
+ struct amdgpu_job *job;
+ job = container_of(ref, struct amdgpu_job, refcount);
+
amdgpu_job_free_resources(job);
dma_fence_put(job->fence);
@@ -110,6 +112,18 @@ void amdgpu_job_free(struct amdgpu_job *job)
kfree(job);
}
+struct amdgpu_job *amdgpu_job_get(struct amdgpu_job *job)
+{
+ kref_get(&job->refcount);
+ return job;
+}
+
+void amdgpu_job_put(struct amdgpu_job **job)
+{
+ kref_put(&(*job)->refcount, amdgpu_job_free);
+ (*job) = NULL;
+}
+
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct dma_fence **f)
@@ -128,6 +142,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+
+ amdgpu_job_get(job);
amd_sched_entity_push_job(&job->base);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1154b0a..48e1f58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1277,7 +1277,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
- goto error_free;
+ goto out_unref;
}
}
@@ -1298,20 +1298,21 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
NULL, fence);
job->fence = dma_fence_get(*fence);
- if (r)
+ if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
- amdgpu_job_free(job);
+ goto out_unref;
+ }
} else {
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
+ if (r) {
+ DRM_ERROR("Error submitting job (%d)\n", r);
+ goto out_unref;
+ }
}
- return r;
-
-error_free:
- amdgpu_job_free(job);
+out_unref:
+ amdgpu_job_put(&job);
return r;
}
@@ -1360,7 +1361,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
- goto error_free;
+ goto out_unref;
}
}
@@ -1395,12 +1396,10 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
- goto error_free;
-
- return 0;
+ goto out_unref;
-error_free:
- amdgpu_job_free(job);
+out_unref:
+ amdgpu_job_put(&job);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 6d6ab7f..3f307e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -980,8 +980,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
job->fence = dma_fence_get(f);
if (r)
goto err_free;
-
- amdgpu_job_free(job);
} else {
r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
@@ -995,12 +993,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
*fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
dma_fence_put(f);
+ amdgpu_job_put(&job);
return 0;
err_free:
- amdgpu_job_free(job);
-
+ amdgpu_job_put(&job);
err:
ttm_eu_backoff_reservation(&ticket, &head);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index e2c0678..50ed4b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -467,16 +467,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f);
if (r)
- goto err;
+ goto out_unref;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
- return 0;
-err:
- amdgpu_job_free(job);
+out_unref:
+ amdgpu_job_put(&job);
return r;
}
@@ -530,23 +528,21 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f);
if (r)
- goto err;
+ goto out_unref;
- amdgpu_job_free(job);
} else {
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
- goto err;
+ goto out_unref;
}
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
- return 0;
-err:
- amdgpu_job_free(job);
+out_unref:
+ amdgpu_job_put(&job);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bd0d331..95f1664 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -668,7 +668,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
+ amdgpu_job_put(&job);
return 0;
}
@@ -683,17 +683,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
- goto error_free;
+ goto out_unref;
amdgpu_bo_fence(vm->page_directory, fence, true);
dma_fence_put(vm->page_directory_fence);
vm->page_directory_fence = dma_fence_get(fence);
dma_fence_put(fence);
- return 0;
-
-error_free:
- amdgpu_job_free(job);
+out_unref:
+ amdgpu_job_put(&job);
return r;
}
@@ -965,16 +963,16 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = amdgpu_sync_fence(adev, &job->sync, exclusive);
if (r)
- goto error_free;
+ goto out_unref;
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
if (r)
- goto error_free;
+ goto out_unref;
r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
if (r)
- goto error_free;
+ goto out_unref;
params.shadow = true;
amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags);
@@ -986,7 +984,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
- goto error_free;
+ goto out_unref;
amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
@@ -994,10 +992,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
*fence = dma_fence_get(f);
}
dma_fence_put(f);
- return 0;
-error_free:
- amdgpu_job_free(job);
+out_unref:
+ amdgpu_job_put(&job);
return r;
}
--
2.9.3
More information about the amd-gfx
mailing list