[Intel-gfx] [PATCH 01/50] drm/i915: s/for_each_ring/for_each_active_ring

oscar.mateo at intel.com oscar.mateo at intel.com
Fri May 9 14:08:31 CEST 2014


From: Ben Widawsky <benjamin.widawsky at intel.com>

The name "active" was recommended by Chris.

With the ordering change of how we initialize things, it is desirable to
be able to address each ring, whether initialized or not.

Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>

v2: Several rebases.

Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     | 12 ++++++------
 drivers/gpu/drm/i915/i915_drv.h         |  2 +-
 drivers/gpu/drm/i915/i915_gem.c         | 16 ++++++++--------
 drivers/gpu/drm/i915/i915_gem_context.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c     | 10 +++++-----
 drivers/gpu/drm/i915/i915_irq.c         | 10 +++++-----
 drivers/gpu/drm/i915/intel_pm.c         |  8 ++++----
 drivers/gpu/drm/i915/intel_ringbuffer.c |  2 +-
 8 files changed, 31 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18b3565..103e62c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -571,7 +571,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 		return ret;
 
 	count = 0;
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		if (list_empty(&ring->request_list))
 			continue;
 
@@ -615,7 +615,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
 		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		i915_ring_seqno_info(m, ring);
 
 	intel_runtime_pm_put(dev_priv);
@@ -752,7 +752,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 			   I915_READ(GTIMR));
 	}
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		if (INTEL_INFO(dev)->gen >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
@@ -1703,7 +1703,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
 		seq_puts(m, "HW context ");
 		describe_ctx(m, ctx);
-		for_each_ring(ring, dev_priv, i)
+		for_each_active_ring(ring, dev_priv, i)
 			if (ring->default_context == ctx)
 				seq_printf(m, "(default context %s) ", ring->name);
 
@@ -1835,7 +1835,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
 	seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
 	seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
-	for_each_ring(ring, dev_priv, unused) {
+	for_each_active_ring(ring, dev_priv, unused) {
 		seq_printf(m, "%s\n", ring->name);
 		for (i = 0; i < 4; i++) {
 			u32 offset = 0x270 + i * 8;
@@ -1857,7 +1857,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		seq_printf(m, "%s\n", ring->name);
 		if (INTEL_INFO(dev)->gen == 7)
 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 26253c0..a53a028 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1545,7 +1545,7 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
 }
 
 /* Iterate over initialised rings */
-#define for_each_ring(ring__, dev_priv__, i__) \
+#define for_each_active_ring(ring__, dev_priv__, i__) \
 	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
 		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8fd1824..ce941cf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2108,7 +2108,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 	int ret, i, j;
 
 	/* Carefully retire all requests without writing to the rings */
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		ret = intel_ring_idle(ring);
 		if (ret)
 			return ret;
@@ -2116,7 +2116,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 	i915_gem_retire_requests(dev);
 
 	/* Finally reset hw state */
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		intel_ring_init_seqno(ring, seqno);
 
 		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
@@ -2434,10 +2434,10 @@ void i915_gem_reset(struct drm_device *dev)
 	 * them for finding the guilty party. As the requests only borrow
 	 * their reference to the objects, the inspection must be done first.
 	 */
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_status(dev_priv, ring);
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_cleanup(dev_priv, ring);
 
 	i915_gem_context_reset(dev);
@@ -2516,7 +2516,7 @@ i915_gem_retire_requests(struct drm_device *dev)
 	bool idle = true;
 	int i;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		i915_gem_retire_requests_ring(ring);
 		idle &= list_empty(&ring->request_list);
 	}
@@ -2804,7 +2804,7 @@ int i915_gpu_idle(struct drm_device *dev)
 	int ret, i;
 
 	/* Flush everything onto the inactive list. */
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		ret = i915_switch_context(ring, ring->default_context);
 		if (ret)
 			return ret;
@@ -4261,7 +4261,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
 	struct intel_ring_buffer *ring;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		intel_stop_ring_buffer(ring);
 }
 
@@ -4533,7 +4533,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 	struct intel_ring_buffer *ring;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		intel_cleanup_ring_buffer(ring);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f77b4c1..948df20 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -483,7 +483,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
 
 	BUG_ON(!dev_priv->ring[RCS].default_context);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		ret = i915_switch_context(ring, ring->default_context);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 72b1bf8..1dff805 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -835,7 +835,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 	struct intel_ring_buffer *ring;
 	int j, ret;
 
-	for_each_ring(ring, dev_priv, j) {
+	for_each_active_ring(ring, dev_priv, j) {
 		I915_WRITE(RING_MODE_GEN7(ring),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
@@ -852,7 +852,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 	return 0;
 
 err_out:
-	for_each_ring(ring, dev_priv, j)
+	for_each_active_ring(ring, dev_priv, j)
 		I915_WRITE(RING_MODE_GEN7(ring),
 			   _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
 	return ret;
@@ -878,7 +878,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 	}
 	I915_WRITE(GAM_ECOCHK, ecochk);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		int ret;
 		/* GFX_MODE is per-ring on gen7+ */
 		I915_WRITE(RING_MODE_GEN7(ring),
@@ -917,7 +917,7 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 
 	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		int ret = ppgtt->switch_mm(ppgtt, ring, true);
 		if (ret)
 			return ret;
@@ -1275,7 +1275,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen < 6)
 		return;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		u32 fault_reg;
 		fault_reg = I915_READ(RING_FAULT_REG(ring));
 		if (fault_reg & RING_FAULT_VALID) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2d76183..4a8e8cb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2122,7 +2122,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 	 */
 
 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		wake_up_all(&ring->irq_queue);
 
 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
@@ -2591,7 +2591,7 @@ semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
 	} else {
 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
-		for_each_ring(signaller, dev_priv, i) {
+		for_each_active_ring(signaller, dev_priv, i) {
 			if(ring == signaller)
 				continue;
 
@@ -2674,7 +2674,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 	struct intel_ring_buffer *ring;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		ring->hangcheck.deadlock = false;
 }
 
@@ -2746,7 +2746,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
 	if (!i915.enable_hangcheck)
 		return;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		u64 acthd;
 		u32 seqno;
 		bool busy = true;
@@ -2825,7 +2825,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
 		busy_count += busy;
 	}
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_active_ring(ring, dev_priv, i) {
 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
 			DRM_INFO("%s on %s\n",
 				 stuck[i] ? "stuck" : "no progress",
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 834c49c..acfded3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3400,7 +3400,7 @@ static void gen8_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_ring(ring, dev_priv, unused)
+	for_each_active_ring(ring, dev_priv, unused)
 		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
@@ -3494,7 +3494,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -3819,7 +3819,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -4435,7 +4435,7 @@ bool i915_gpu_busy(void)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_active_ring(ring, dev_priv, i)
 		ret |= !list_empty(&ring->request_list);
 
 out_unlock:
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 40a7aa4..a112971 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -685,7 +685,7 @@ static int gen6_signal(struct intel_ring_buffer *signaller,
 		return ret;
 #undef MBOX_UPDATE_DWORDS
 
-	for_each_ring(useless, dev_priv, i) {
+	for_each_active_ring(useless, dev_priv, i) {
 		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
 		if (mbox_reg != GEN6_NOSYNC) {
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
-- 
1.9.0




More information about the Intel-gfx mailing list