[PATCH 2/2] drm/sched: fix timeout handling
Christian König
ckoenig.leichtzumerken at gmail.com
Mon Oct 8 11:36:17 UTC 2018
We need to make sure that we don't race between job completion and
timeout.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/scheduler/sched_main.c | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bd7d11c47202..ad3c57c9fd21 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -248,14 +248,40 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
+ struct drm_sched_fence *fence;
struct drm_sched_job *job;
+ int r;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
+ fence = job->s_fence;
+ if (!dma_fence_remove_callback(fence->parent, &fence->cb))
+ goto already_signaled;
+ }
+
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
+ spin_unlock(&sched->job_list_lock);
if (job)
- job->sched->ops->timedout_job(job);
+ sched->ops->timedout_job(job);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry(job, &sched->ring_mirror_list, node) {
+ fence = job->s_fence;
+ if (!fence->parent || !list_empty(&fence->cb.node))
+ continue;
+
+ r = dma_fence_add_callback(fence->parent, &fence->cb,
+ drm_sched_process_job);
+ if (r)
+already_signaled:
+ drm_sched_process_job(fence->parent, &fence->cb);
+
+ }
+ spin_unlock(&sched->job_list_lock);
}
/**
--
2.14.1
More information about the dri-devel
mailing list