[Intel-gfx] [PATCH 4/8] drm/i915/gt: Convert timeline tracking to spinlock
Chris Wilson
chris at chris-wilson.co.uk
Fri Jul 5 07:46:00 UTC 2019
Convert the list manipulation of active to use spinlocks so that we can
perform the updates from underneath a quick interrupt callback.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_gt_types.h | 2 +-
drivers/gpu/drm/i915/gt/intel_reset.c | 13 ++++++++++---
drivers/gpu/drm/i915/gt/intel_timeline.c | 12 +++++-------
drivers/gpu/drm/i915/i915_gem.c | 20 ++++++++++----------
4 files changed, 26 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index c03e56628ee2..cfd41e6c54e1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -26,7 +26,7 @@ struct intel_gt {
struct i915_ggtt *ggtt;
struct intel_gt_timelines {
- struct mutex mutex; /* protects list */
+ spinlock_t lock; /* protects active_list */
struct list_head active_list;
/* Pack multiple timelines' seqnos into the same page */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index adfdb908587f..72002c0f9698 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -858,6 +858,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl;
if (!test_bit(I915_WEDGED, &error->flags))
@@ -878,14 +879,16 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
*
* No more can be submitted until we reset the wedged bit.
*/
- mutex_lock(&i915->gt.timelines.mutex);
- list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
+ spin_lock(&timelines->lock);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
+ spin_unlock(&timelines->lock);
+
/*
* All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting
@@ -895,8 +898,12 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
*/
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
+
+ /* Restart iteration after droping lock */
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&i915->gt.timelines.mutex);
+ spin_unlock(&timelines->lock);
intel_gt_sanitize(&i915->gt, false);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 4af0b9801d91..355dfc52c804 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -266,7 +266,7 @@ static void timelines_init(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = >->timelines;
- mutex_init(&timelines->mutex);
+ spin_lock_init(&timelines->lock);
INIT_LIST_HEAD(&timelines->active_list);
spin_lock_init(&timelines->hwsp_lock);
@@ -345,9 +345,9 @@ void intel_timeline_enter(struct intel_timeline *tl)
return;
GEM_BUG_ON(!tl->active_count); /* overflow? */
- mutex_lock(&timelines->mutex);
+ spin_lock(&timelines->lock);
list_add(&tl->link, &timelines->active_list);
- mutex_unlock(&timelines->mutex);
+ spin_unlock(&timelines->lock);
}
void intel_timeline_exit(struct intel_timeline *tl)
@@ -358,9 +358,9 @@ void intel_timeline_exit(struct intel_timeline *tl)
if (--tl->active_count)
return;
- mutex_lock(&timelines->mutex);
+ spin_lock(&timelines->lock);
list_del(&tl->link);
- mutex_unlock(&timelines->mutex);
+ spin_unlock(&timelines->lock);
/*
* Since this timeline is idle, all bariers upon which we were waiting
@@ -548,8 +548,6 @@ static void timelines_fini(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->active_list));
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
-
- mutex_destroy(&timelines->mutex);
}
void intel_timelines_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7ade42b8ec99..b6f3baa74da4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -909,20 +909,20 @@ static int wait_for_engines(struct drm_i915_private *i915)
static long
wait_for_timelines(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
+ unsigned int wait, long timeout)
{
- struct intel_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl;
- mutex_lock(>->mutex);
- list_for_each_entry(tl, >->active_list, link) {
+ spin_lock(&timelines->lock);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
- mutex_unlock(>->mutex);
+ spin_unlock(&timelines->lock);
/*
* "Race-to-idle".
@@ -933,19 +933,19 @@ wait_for_timelines(struct drm_i915_private *i915,
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks.
*/
- if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ if (wait & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq);
- timeout = i915_request_wait(rq, flags, timeout);
+ timeout = i915_request_wait(rq, wait, timeout);
i915_request_put(rq);
if (timeout < 0)
return timeout;
/* restart after reacquiring the lock */
- mutex_lock(>->mutex);
- tl = list_entry(>->active_list, typeof(*tl), link);
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(>->mutex);
+ spin_unlock(&timelines->lock);
return timeout;
}
--
2.20.1
More information about the Intel-gfx
mailing list