[Intel-gfx] [PATCH] drm/i915: Convert trace-irq to the breadcrumb waiter

Chris Wilson chris at chris-wilson.co.uk
Mon Nov 30 07:45:18 PST 2015


If we convert the tracing over from direct use of ring->irq_get() and
over to the breadcrumb infrastructure, we only have a single user of the
ring->irq_get and so can simplify the driver routines (eliminating the
redundant validation and irq refcounting).

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h          |   4 +-
 drivers/gpu/drm/i915/i915_gem.c          |   2 +-
 drivers/gpu/drm/i915/intel_breadcrumbs.c |   3 +-
 drivers/gpu/drm/i915/intel_lrc.c         |  24 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c  | 210 +++++++++++--------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h  |   3 +-
 6 files changed, 87 insertions(+), 159 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f50aa580e0b5..e4d3abe24950 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3639,8 +3639,10 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
 static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
 				      struct drm_i915_gem_request *req)
 {
-	if (ring->trace_irq_req == NULL && ring->irq_get(ring))
+	if (ring->trace_irq_req == NULL) {
 		i915_gem_request_assign(&ring->trace_irq_req, req);
+		intel_breadcrumbs_add_waiter(req);
+	}
 }
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0afcbef7ff5f..5902bcd87178 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2876,7 +2876,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
 	if (unlikely(ring->trace_irq_req &&
 		     i915_gem_request_completed(ring->trace_irq_req))) {
-		ring->irq_put(ring);
+		intel_breadcrumbs_remove_waiter(ring->trace_irq_req);
 		i915_gem_request_assign(&ring->trace_irq_req, NULL);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 69091f5ae0f5..5f659079ac1c 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -34,7 +34,8 @@ static bool __irq_enable(struct intel_engine_cs *engine)
 	if (!intel_irqs_enabled(engine->i915))
 		return false;
 
-	return engine->irq_get(engine);
+	engine->irq_get(engine);
+	return true;
 }
 
 static struct drm_i915_gem_request *to_request(struct rb_node *rb)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c457dd035900..8842ea0b53fe 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1620,23 +1620,15 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 	return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-		POSTING_READ(RING_IMR(ring->mmio_base));
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
-	return true;
+	spin_lock_irq(&dev_priv->irq_lock);
+	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
+	POSTING_READ(RING_IMR(ring->mmio_base));
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
@@ -1646,10 +1638,8 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
-		POSTING_READ(RING_IMR(ring->mmio_base));
-	}
+	I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
+	POSTING_READ(RING_IMR(ring->mmio_base));
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ccceb43f14ac..2c6f74c004cb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1504,22 +1504,15 @@ gen6_seqno_barrier(struct intel_engine_cs *ring)
 	intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static bool
+static void
 gen5_ring_get_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0)
-		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
-	return true;
+	spin_lock_irq(&dev_priv->irq_lock);
+	gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void
@@ -1527,33 +1520,23 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0)
-		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_lock_irq(&dev_priv->irq_lock);
+	gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static bool
+static void
 i9xx_ring_get_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (!intel_irqs_enabled(dev_priv))
-		return false;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~ring->irq_enable_mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	spin_lock_irq(&dev_priv->irq_lock);
+	dev_priv->irq_mask &= ~ring->irq_enable_mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void
@@ -1561,36 +1544,25 @@ i9xx_ring_put_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		dev_priv->irq_mask |= ring->irq_enable_mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_lock_irq(&dev_priv->irq_lock);
+	dev_priv->irq_mask |= ring->irq_enable_mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static bool
+static void
 i8xx_ring_get_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (!intel_irqs_enabled(dev_priv))
-		return false;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~ring->irq_enable_mask;
-		I915_WRITE16(IMR, dev_priv->irq_mask);
-		POSTING_READ16(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	spin_lock_irq(&dev_priv->irq_lock);
+	dev_priv->irq_mask &= ~ring->irq_enable_mask;
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+	POSTING_READ16(IMR);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void
@@ -1598,15 +1570,12 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		dev_priv->irq_mask |= ring->irq_enable_mask;
-		I915_WRITE16(IMR, dev_priv->irq_mask);
-		POSTING_READ16(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_lock_irq(&dev_priv->irq_lock);
+	dev_priv->irq_mask |= ring->irq_enable_mask;
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+	POSTING_READ16(IMR);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static int
@@ -1646,29 +1615,21 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static bool
+static void
 gen6_ring_get_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring,
-				       ~(ring->irq_enable_mask |
-					 GT_PARITY_ERROR(dev)));
-		else
-			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (HAS_L3_DPF(dev) && ring->id == RCS)
+		I915_WRITE_IMR(ring,
+			       ~(ring->irq_enable_mask |
+				 GT_PARITY_ERROR(dev)));
+	else
+		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+	gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void
@@ -1676,37 +1637,26 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
-		else
-			I915_WRITE_IMR(ring, ~0);
-		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (HAS_L3_DPF(dev) && ring->id == RCS)
+		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+	else
+		I915_WRITE_IMR(ring, ~0);
+	gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static bool
+static void
 hsw_vebox_get_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
-	return true;
+	spin_lock_irq(&dev_priv->irq_lock);
+	I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+	gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void
@@ -1714,40 +1664,29 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		I915_WRITE_IMR(ring, ~0);
-		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	spin_lock_irq(&dev_priv->irq_lock);
+	I915_WRITE_IMR(ring, ~0);
+	gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static bool
+static void
 gen8_ring_get_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-			I915_WRITE_IMR(ring,
-				       ~(ring->irq_enable_mask |
-					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-		} else {
-			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		}
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (HAS_L3_DPF(dev) && ring->id == RCS) {
+		I915_WRITE_IMR(ring,
+			       ~(ring->irq_enable_mask |
+				 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+	} else {
+		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	POSTING_READ(RING_IMR(ring->mmio_base));
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void
@@ -1755,19 +1694,16 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-			I915_WRITE_IMR(ring,
-				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-		} else {
-			I915_WRITE_IMR(ring, ~0);
-		}
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (HAS_L3_DPF(dev) && ring->id == RCS) {
+		I915_WRITE_IMR(ring,
+			       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	} else {
+		I915_WRITE_IMR(ring, ~0);
 	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	POSTING_READ(RING_IMR(ring->mmio_base));
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 74b9df21f062..9f4a945e312c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -171,10 +171,9 @@ struct  intel_engine_cs {
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
 
-	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
 	struct drm_i915_gem_request *trace_irq_req;
-	bool __must_check (*irq_get)(struct intel_engine_cs *ring);
+	void		(*irq_get)(struct intel_engine_cs *ring);
 	void		(*irq_put)(struct intel_engine_cs *ring);
 
 	int		(*init_hw)(struct intel_engine_cs *ring);
-- 
2.6.2



More information about the Intel-gfx mailing list