[PATCH 2/2] drm/amdgpu: stop disabling irqs when it isn't neccessary
Christian König
deathsimple at vodafone.de
Mon Jun 13 14:12:43 UTC 2016
From: Christian König <christian.koenig at amd.com>
A regular spin_lock/unlock should do here as well.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index b1d49c5..e13b140 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -331,17 +331,16 @@ static void amd_sched_job_finish(struct work_struct *work)
struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
/* remove job from ring_mirror_list */
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
struct amd_sched_job *next;
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
cancel_delayed_work_sync(&s_job->work_tdr);
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
@@ -350,7 +349,7 @@ static void amd_sched_job_finish(struct work_struct *work)
if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
sched->ops->free_job(s_job);
}
@@ -364,15 +363,14 @@ static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node) == s_job)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
static void amd_sched_job_timedout(struct work_struct *work)
--
2.5.0
More information about the dri-devel
mailing list