[Intel-gfx] [PATCH 13/19] drm/i915: Reduce spinlock hold time during notify_ring() interrupt
Chris Wilson
chris at chris-wilson.co.uk
Tue Jan 2 15:12:29 UTC 2018
By taking advantage of the RCU protection of the request and task
structs, we can find the appropriate signaler under the spinlock and
then release the spinlock before waking the task and signaling the
fence.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_irq.c | 40 ++++++++++++++++++++++------------------
1 file changed, 22 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3517c6548e2c..843a1560d875 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1065,7 +1065,9 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
+ const u32 seqno = intel_engine_get_seqno(engine);
struct drm_i915_gem_request *rq = NULL;
+ struct task_struct *tsk = NULL;
struct intel_wait *wait;
if (!engine->breadcrumbs.irq_armed)
@@ -1074,12 +1076,13 @@ static void notify_ring(struct intel_engine_cs *engine)
atomic_inc(&engine->irq_count);
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ rcu_read_lock();
+
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
- bool wakeup = engine->irq_seqno_barrier;
-
- /* We use a callback from the dma-fence to submit
+ /*
+ * We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
* hardware, signal the fence now rather than wait for
@@ -1090,29 +1093,30 @@ static void notify_ring(struct intel_engine_cs *engine)
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
- if (i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno)) {
- struct drm_i915_gem_request *waiter = wait->request;
-
- wakeup = true;
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &waiter->fence.flags) &&
- intel_wait_check_request(wait, waiter))
- rq = i915_gem_request_get(waiter);
+ if (i915_seqno_passed(seqno, wait->seqno)) {
+ tsk = wait->tsk;
+ rq = wait->request;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags) ||
+ intel_wait_check_request(wait, rq))
+ rq = NULL;
+ } else {
+ if (engine->irq_seqno_barrier &&
+ i915_seqno_passed(seqno, wait->seqno - 1))
+ tsk = wait->tsk;
}
-
- if (wakeup)
- wake_up_process(wait->tsk);
} else {
if (engine->breadcrumbs.irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
}
spin_unlock(&engine->breadcrumbs.irq_lock);
- if (rq) {
+ if (rq)
dma_fence_signal(&rq->fence);
- i915_gem_request_put(rq);
- }
+ if (tsk)
+ wake_up_process(tsk);
+
+ rcu_read_unlock();
trace_intel_engine_notify(engine, wait);
}
--
2.15.1
More information about the Intel-gfx
mailing list