[Intel-gfx] [PATCH 2/3] drm/i915: Renames variables and functions that act upon intel_engine_cs

Chris Wilson chris at chris-wilson.co.uk
Sun Aug 24 17:54:21 CEST 2014


We have engines and rings now, both of which we refer to as rings. Which
is extremely confusing, and will only get worse as the distinction
between the two grows.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_cmd_parser.c     | 150 +++----
 drivers/gpu/drm/i915/i915_debugfs.c        | 132 +++----
 drivers/gpu/drm/i915/i915_dma.c            |   8 +-
 drivers/gpu/drm/i915/i915_drv.h            |  65 ++-
 drivers/gpu/drm/i915/i915_gem.c            | 332 ++++++++--------
 drivers/gpu/drm/i915/i915_gem_context.c    | 124 +++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 164 ++++----
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 120 +++---
 drivers/gpu/drm/i915/i915_gpu_error.c      | 138 +++----
 drivers/gpu/drm/i915/i915_irq.c            | 222 +++++------
 drivers/gpu/drm/i915/intel_display.c       | 176 ++++-----
 drivers/gpu/drm/i915/intel_drv.h           |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 608 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_lrc.h           |   2 +-
 drivers/gpu/drm/i915/intel_overlay.c       |  70 ++--
 drivers/gpu/drm/i915/intel_pm.c            |  50 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c    |  88 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 157 ++++----
 18 files changed, 1302 insertions(+), 1306 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index c45856b..a15fbb7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -501,7 +501,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
 	return 0;
 }
 
-static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+static bool validate_cmds_sorted(struct intel_engine_cs *engine,
 				 const struct drm_i915_cmd_table *cmd_tables,
 				 int cmd_table_count)
 {
@@ -523,7 +523,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
 
 			if (curr < previous) {
 				DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
-					  ring->id, i, j, curr, previous);
+					  engine->id, i, j, curr, previous);
 				ret = false;
 			}
 
@@ -555,11 +555,11 @@ static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
 	return ret;
 }
 
-static bool validate_regs_sorted(struct intel_engine_cs *ring)
+static bool validate_regs_sorted(struct intel_engine_cs *engine)
 {
-	return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
-		check_sorted(ring->id, ring->master_reg_table,
-			     ring->master_reg_count);
+	return check_sorted(engine->id, engine->reg_table, engine->reg_count) &&
+		check_sorted(engine->id, engine->master_reg_table,
+			     engine->master_reg_count);
 }
 
 struct cmd_node {
@@ -583,13 +583,13 @@ struct cmd_node {
  */
 #define CMD_HASH_MASK STD_MI_OPCODE_MASK
 
-static int init_hash_table(struct intel_engine_cs *ring,
+static int init_hash_table(struct intel_engine_cs *engine,
 			   const struct drm_i915_cmd_table *cmd_tables,
 			   int cmd_table_count)
 {
 	int i, j;
 
-	hash_init(ring->cmd_hash);
+	hash_init(engine->cmd_hash);
 
 	for (i = 0; i < cmd_table_count; i++) {
 		const struct drm_i915_cmd_table *table = &cmd_tables[i];
@@ -604,7 +604,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
 				return -ENOMEM;
 
 			desc_node->desc = desc;
-			hash_add(ring->cmd_hash, &desc_node->node,
+			hash_add(engine->cmd_hash, &desc_node->node,
 				 desc->cmd.value & CMD_HASH_MASK);
 		}
 	}
@@ -612,21 +612,21 @@ static int init_hash_table(struct intel_engine_cs *ring,
 	return 0;
 }
 
-static void fini_hash_table(struct intel_engine_cs *ring)
+static void fini_hash_table(struct intel_engine_cs *engine)
 {
 	struct hlist_node *tmp;
 	struct cmd_node *desc_node;
 	int i;
 
-	hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+	hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
 		hash_del(&desc_node->node);
 		kfree(desc_node);
 	}
 }
 
 /**
- * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
- * @ring: the ringbuffer to initialize
+ * i915_cmd_parser_init_engine() - set cmd parser related fields for a ringbuffer
+ * @engine: the ringbuffer to initialize
  *
  * Optionally initializes fields related to batch buffer command parsing in the
  * struct intel_engine_cs based on whether the platform requires software
@@ -634,18 +634,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
  *
  * Return: non-zero if initialization fails
  */
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
+int i915_cmd_parser_init_engine(struct intel_engine_cs *engine)
 {
 	const struct drm_i915_cmd_table *cmd_tables;
 	int cmd_table_count;
 	int ret;
 
-	if (!IS_GEN7(ring->dev))
+	if (!IS_GEN7(engine->dev))
 		return 0;
 
-	switch (ring->id) {
+	switch (engine->id) {
 	case RCS:
-		if (IS_HASWELL(ring->dev)) {
+		if (IS_HASWELL(engine->dev)) {
 			cmd_tables = hsw_render_ring_cmds;
 			cmd_table_count =
 				ARRAY_SIZE(hsw_render_ring_cmds);
@@ -654,26 +654,26 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
 			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
 		}
 
-		ring->reg_table = gen7_render_regs;
-		ring->reg_count = ARRAY_SIZE(gen7_render_regs);
+		engine->reg_table = gen7_render_regs;
+		engine->reg_count = ARRAY_SIZE(gen7_render_regs);
 
-		if (IS_HASWELL(ring->dev)) {
-			ring->master_reg_table = hsw_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+		if (IS_HASWELL(engine->dev)) {
+			engine->master_reg_table = hsw_master_regs;
+			engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
 		} else {
-			ring->master_reg_table = ivb_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+			engine->master_reg_table = ivb_master_regs;
+			engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
 		}
 
-		ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
 		break;
 	case VCS:
 		cmd_tables = gen7_video_cmds;
 		cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
-		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	case BCS:
-		if (IS_HASWELL(ring->dev)) {
+		if (IS_HASWELL(engine->dev)) {
 			cmd_tables = hsw_blt_ring_cmds;
 			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
 		} else {
@@ -681,68 +681,68 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
 			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
 		}
 
-		ring->reg_table = gen7_blt_regs;
-		ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
+		engine->reg_table = gen7_blt_regs;
+		engine->reg_count = ARRAY_SIZE(gen7_blt_regs);
 
-		if (IS_HASWELL(ring->dev)) {
-			ring->master_reg_table = hsw_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+		if (IS_HASWELL(engine->dev)) {
+			engine->master_reg_table = hsw_master_regs;
+			engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
 		} else {
-			ring->master_reg_table = ivb_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+			engine->master_reg_table = ivb_master_regs;
+			engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
 		}
 
-		ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
 		break;
 	case VECS:
 		cmd_tables = hsw_vebox_cmds;
 		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
 		/* VECS can use the same length_mask function as VCS */
-		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	default:
-		DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
-			  ring->id);
+		DRM_ERROR("CMD: cmd_parser_init with unknown engine: %d\n",
+			  engine->id);
 		BUG();
 	}
 
-	BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
-	BUG_ON(!validate_regs_sorted(ring));
+	BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
+	BUG_ON(!validate_regs_sorted(engine));
 
-	ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+	ret = init_hash_table(engine, cmd_tables, cmd_table_count);
 	if (ret) {
 		DRM_ERROR("CMD: cmd_parser_init failed!\n");
-		fini_hash_table(ring);
+		fini_hash_table(engine);
 		return ret;
 	}
 
-	ring->needs_cmd_parser = true;
+	engine->needs_cmd_parser = true;
 
 	return 0;
 }
 
 /**
- * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
- * @ring: the ringbuffer to clean up
+ * i915_cmd_parser_fini_engine() - clean up cmd parser related fields
+ * @engine: the ringbuffer to clean up
  *
  * Releases any resources related to command parsing that may have been
- * initialized for the specified ring.
+ * initialized for the specified engine.
  */
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+void i915_cmd_parser_fini_engine(struct intel_engine_cs *engine)
 {
-	if (!ring->needs_cmd_parser)
+	if (!engine->needs_cmd_parser)
 		return;
 
-	fini_hash_table(ring);
+	fini_hash_table(engine);
 }
 
 static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(struct intel_engine_cs *ring,
+find_cmd_in_table(struct intel_engine_cs *engine,
 		  u32 cmd_header)
 {
 	struct cmd_node *desc_node;
 
-	hash_for_each_possible(ring->cmd_hash, desc_node, node,
+	hash_for_each_possible(engine->cmd_hash, desc_node, node,
 			       cmd_header & CMD_HASH_MASK) {
 		const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
 		u32 masked_cmd = desc->cmd.mask & cmd_header;
@@ -759,23 +759,23 @@ find_cmd_in_table(struct intel_engine_cs *ring,
  * Returns a pointer to a descriptor for the command specified by cmd_header.
  *
  * The caller must supply space for a default descriptor via the default_desc
- * parameter. If no descriptor for the specified command exists in the ring's
+ * parameter. If no descriptor for the specified command exists in the engine's
  * command parser tables, this function fills in default_desc based on the
- * ring's default length encoding and returns default_desc.
+ * engine's default length encoding and returns default_desc.
  */
 static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_engine_cs *ring,
+find_cmd(struct intel_engine_cs *engine,
 	 u32 cmd_header,
 	 struct drm_i915_cmd_descriptor *default_desc)
 {
 	const struct drm_i915_cmd_descriptor *desc;
 	u32 mask;
 
-	desc = find_cmd_in_table(ring, cmd_header);
+	desc = find_cmd_in_table(engine, cmd_header);
 	if (desc)
 		return desc;
 
-	mask = ring->get_cmd_length_mask(cmd_header);
+	mask = engine->get_cmd_length_mask(cmd_header);
 	if (!mask)
 		return NULL;
 
@@ -832,17 +832,17 @@ finish:
 }
 
 /**
- * i915_needs_cmd_parser() - should a given ring use software command parsing?
- * @ring: the ring in question
+ * i915_needs_cmd_parser() - should a given engine use software command parsing?
+ * @engine: the engine in question
  *
  * Only certain platforms require software batch buffer command parsing, and
  * only when enabled via module paramter.
  *
- * Return: true if the ring requires software command parsing
+ * Return: true if the engine requires software command parsing
  */
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
 {
-	if (!ring->needs_cmd_parser)
+	if (!engine->needs_cmd_parser)
 		return false;
 
 	/*
@@ -850,13 +850,13 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
 	 * disabled. That will cause all of the parser's PPGTT checks to
 	 * fail. For now, disable parsing when PPGTT is off.
 	 */
-	if (USES_PPGTT(ring->dev))
+	if (USES_PPGTT(engine->dev))
 		return false;
 
 	return (i915.enable_cmd_parser == 1);
 }
 
-static bool check_cmd(const struct intel_engine_cs *ring,
+static bool check_cmd(const struct intel_engine_cs *engine,
 		      const struct drm_i915_cmd_descriptor *desc,
 		      const u32 *cmd,
 		      const bool is_master,
@@ -893,16 +893,16 @@ static bool check_cmd(const struct intel_engine_cs *ring,
 				*oacontrol_set = (cmd[2] != 0);
 		}
 
-		if (!valid_reg(ring->reg_table,
-			       ring->reg_count, reg_addr)) {
+		if (!valid_reg(engine->reg_table,
+			       engine->reg_count, reg_addr)) {
 			if (!is_master ||
-			    !valid_reg(ring->master_reg_table,
-				       ring->master_reg_count,
+			    !valid_reg(engine->master_reg_table,
+				       engine->master_reg_count,
 				       reg_addr)) {
-				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (engine=%d)\n",
 						 reg_addr,
 						 *cmd,
-						 ring->id);
+						 engine->id);
 				return false;
 			}
 		}
@@ -931,11 +931,11 @@ static bool check_cmd(const struct intel_engine_cs *ring,
 				desc->bits[i].mask;
 
 			if (dword != desc->bits[i].expected) {
-				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (engine=%d)\n",
 						 *cmd,
 						 desc->bits[i].mask,
 						 desc->bits[i].expected,
-						 dword, ring->id);
+						 dword, engine->id);
 				return false;
 			}
 		}
@@ -948,7 +948,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
 
 /**
  * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
- * @ring: the ring on which the batch is to execute
+ * @engine: the engine on which the batch is to execute
  * @batch_obj: the batch buffer in question
  * @batch_start_offset: byte offset in the batch at which execution starts
  * @is_master: is the submitting process the drm master?
@@ -958,7 +958,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
  *
  * Return: non-zero if the parser finds violations or otherwise fails
  */
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_parse_cmds(struct intel_engine_cs *engine,
 		    struct drm_i915_gem_object *batch_obj,
 		    u32 batch_start_offset,
 		    bool is_master)
@@ -995,7 +995,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
 		if (*cmd == MI_BATCH_BUFFER_END)
 			break;
 
-		desc = find_cmd(ring, *cmd, &default_desc);
+		desc = find_cmd(engine, *cmd, &default_desc);
 		if (!desc) {
 			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
 					 *cmd);
@@ -1017,7 +1017,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
 			break;
 		}
 
-		if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
+		if (!check_cmd(engine, desc, cmd, is_master, &oacontrol_set)) {
 			ret = -EINVAL;
 			break;
 		}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6c82bda..a7b9f37 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -573,7 +573,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_gem_request *gem_request;
 	int ret, count, i;
 
@@ -582,13 +582,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 		return ret;
 
 	count = 0;
-	for_each_ring(ring, dev_priv, i) {
-		if (list_empty(&ring->request_list))
+	for_each_engine(engine, dev_priv, i) {
+		if (list_empty(&engine->request_list))
 			continue;
 
-		seq_printf(m, "%s requests:\n", ring->name);
+		seq_printf(m, "%s requests:\n", engine->name);
 		list_for_each_entry(gem_request,
-				    &ring->request_list,
+				    &engine->request_list,
 				    list) {
 			seq_printf(m, "    %d @ %d\n",
 				   gem_request->seqno,
@@ -605,11 +605,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 }
 
 static void i915_ring_seqno_info(struct seq_file *m,
-				 struct intel_engine_cs *ring)
+				 struct intel_engine_cs *engine)
 {
-	if (ring->get_seqno) {
+	if (engine->get_seqno) {
 		seq_printf(m, "Current sequence (%s): %u\n",
-			   ring->name, ring->get_seqno(ring, false));
+			   engine->name, engine->get_seqno(engine, false));
 	}
 }
 
@@ -618,7 +618,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -626,8 +626,8 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
 		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, i)
-		i915_ring_seqno_info(m, ring);
+	for_each_engine(engine, dev_priv, i)
+		i915_ring_seqno_info(m, engine);
 
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
@@ -641,7 +641,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i, pipe;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -809,13 +809,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 			   I915_READ(GTIMR));
 	}
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv, i) {
 		if (INTEL_INFO(dev)->gen >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
-				   ring->name, I915_READ_IMR(ring));
+				   engine->name, I915_READ_IMR(engine));
 		}
-		i915_ring_seqno_info(m, ring);
+		i915_ring_seqno_info(m, engine);
 	}
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
@@ -857,12 +857,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	const u32 *hws;
 	int i;
 
-	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
-	hws = ring->status_page.page_addr;
+	engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+	hws = engine->status_page.page_addr;
 	if (hws == NULL)
 		return 0;
 
@@ -1690,7 +1690,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
 	int ret, i;
 
@@ -1717,21 +1717,21 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
 		seq_puts(m, "HW context ");
 		describe_ctx(m, ctx);
-		for_each_ring(ring, dev_priv, i) {
-			if (ring->default_context == ctx)
+		for_each_engine(engine, dev_priv, i) {
+			if (engine->default_context == ctx)
 				seq_printf(m, "(default context %s) ",
-					   ring->name);
+					   engine->name);
 		}
 
 		if (i915.enable_execlists) {
 			seq_putc(m, '\n');
-			for_each_ring(ring, dev_priv, i) {
+			for_each_engine(engine, dev_priv, i) {
 				struct drm_i915_gem_object *ctx_obj =
-					ctx->engine[i].state;
+					ctx->ring[i].state;
 				struct intel_ringbuffer *ringbuf =
-					ctx->engine[i].ringbuf;
+					ctx->ring[i].ringbuf;
 
-				seq_printf(m, "%s: ", ring->name);
+				seq_printf(m, "%s: ", engine->name);
 				if (ctx_obj)
 					describe_obj(m, ctx_obj);
 				if (ringbuf)
@@ -1755,7 +1755,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
 	int ret, i;
 
@@ -1769,10 +1769,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 		return ret;
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
-		for_each_ring(ring, dev_priv, i) {
-			struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
+		for_each_engine(engine, dev_priv, i) {
+			struct drm_i915_gem_object *ctx_obj = ctx->ring[i].state;
 
-			if (ring->default_context == ctx)
+			if (engine->default_context == ctx)
 				continue;
 
 			if (ctx_obj) {
@@ -1780,7 +1780,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 				uint32_t *reg_state = kmap_atomic(page);
 				int j;
 
-				seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+				seq_printf(m, "CONTEXT: %s %u\n", engine->name,
 						intel_execlists_ctx_id(ctx_obj));
 
 				for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
@@ -1806,7 +1806,7 @@ static int i915_execlists(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 status_pointer;
 	u8 read_pointer;
 	u8 write_pointer;
@@ -1825,22 +1825,22 @@ static int i915_execlists(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	for_each_ring(ring, dev_priv, ring_id) {
+	for_each_engine(engine, dev_priv, ring_id) {
 		struct intel_ctx_submit_request *head_req = NULL;
 		int count = 0;
 		unsigned long flags;
 
-		seq_printf(m, "%s\n", ring->name);
+		seq_printf(m, "%s\n", engine->name);
 
-		status = I915_READ(RING_EXECLIST_STATUS(ring));
-		ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
+		status = I915_READ(RING_EXECLIST_STATUS(engine));
+		ctx_id = I915_READ(RING_EXECLIST_STATUS(engine) + 4);
 		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
 			   status, ctx_id);
 
-		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
-		read_pointer = ring->next_context_status_buffer;
+		read_pointer = engine->next_context_status_buffer;
 		write_pointer = status_pointer & 0x07;
 		if (read_pointer > write_pointer)
 			write_pointer += 6;
@@ -1848,25 +1848,25 @@ static int i915_execlists(struct seq_file *m, void *data)
 			   read_pointer, write_pointer);
 
 		for (i = 0; i < 6; i++) {
-			status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
-			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
+			status = I915_READ(RING_CONTEXT_STATUS_BUF(engine) + 8*i);
+			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(engine) + 8*i + 4);
 
 			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
 				   i, status, ctx_id);
 		}
 
-		spin_lock_irqsave(&ring->execlist_lock, flags);
-		list_for_each(cursor, &ring->execlist_queue)
+		spin_lock_irqsave(&engine->execlist_lock, flags);
+		list_for_each(cursor, &engine->execlist_queue)
 			count++;
-		head_req = list_first_entry_or_null(&ring->execlist_queue,
+		head_req = list_first_entry_or_null(&engine->execlist_queue,
 				struct intel_ctx_submit_request, execlist_link);
-		spin_unlock_irqrestore(&ring->execlist_lock, flags);
+		spin_unlock_irqrestore(&engine->execlist_lock, flags);
 
 		seq_printf(m, "\t%d requests in queue\n", count);
 		if (head_req) {
 			struct drm_i915_gem_object *ctx_obj;
 
-			ctx_obj = head_req->ctx->engine[ring_id].state;
+			ctx_obj = head_req->ctx->ring[ring_id].state;
 			seq_printf(m, "\tHead request id: %u\n",
 				   intel_execlists_ctx_id(ctx_obj));
 			seq_printf(m, "\tHead request tail: %u\n",
@@ -2001,7 +2001,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 	int unused, i;
 
@@ -2010,13 +2010,13 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
 	seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
 	seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
-	for_each_ring(ring, dev_priv, unused) {
-		seq_printf(m, "%s\n", ring->name);
+	for_each_engine(engine, dev_priv, unused) {
+		seq_printf(m, "%s\n", engine->name);
 		for (i = 0; i < 4; i++) {
 			u32 offset = 0x270 + i * 8;
-			u64 pdp = I915_READ(ring->mmio_base + offset + 4);
+			u64 pdp = I915_READ(engine->mmio_base + offset + 4);
 			pdp <<= 32;
-			pdp |= I915_READ(ring->mmio_base + offset);
+			pdp |= I915_READ(engine->mmio_base + offset);
 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
 		}
 	}
@@ -2025,20 +2025,20 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_file *file;
 	int i;
 
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
-	for_each_ring(ring, dev_priv, i) {
-		seq_printf(m, "%s\n", ring->name);
+	for_each_engine(engine, dev_priv, i) {
+		seq_printf(m, "%s\n", engine->name);
 		if (INTEL_INFO(dev)->gen == 7)
-			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
-		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
-		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
-		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(engine)));
+		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(engine)));
+		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(engine)));
+		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(engine)));
 	}
 	if (dev_priv->mm.aliasing_ppgtt) {
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2525,7 +2525,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
 	int i, j, ret;
 
@@ -2546,14 +2546,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
 
 		seqno = (uint64_t *)kmap_atomic(page);
-		for_each_ring(ring, dev_priv, i) {
+		for_each_engine(engine, dev_priv, i) {
 			uint64_t offset;
 
-			seq_printf(m, "%s\n", ring->name);
+			seq_printf(m, "%s\n", engine->name);
 
 			seq_puts(m, "  Last signal:");
 			for (j = 0; j < num_rings; j++) {
-				offset = i * I915_NUM_RINGS + j;
+				offset = i * I915_NUM_ENGINES + j;
 				seq_printf(m, "0x%08llx (0x%02llx) ",
 					   seqno[offset], offset * 8);
 			}
@@ -2561,7 +2561,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 
 			seq_puts(m, "  Last wait:  ");
 			for (j = 0; j < num_rings; j++) {
-				offset = i + (j * I915_NUM_RINGS);
+				offset = i + (j * I915_NUM_ENGINES);
 				seq_printf(m, "0x%08llx (0x%02llx) ",
 					   seqno[offset], offset * 8);
 			}
@@ -2571,17 +2571,17 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 		kunmap_atomic(seqno);
 	} else {
 		seq_puts(m, "  Last signal:");
-		for_each_ring(ring, dev_priv, i)
+		for_each_engine(engine, dev_priv, i)
 			for (j = 0; j < num_rings; j++)
 				seq_printf(m, "0x%08x\n",
-					   I915_READ(ring->semaphore.mbox.signal[j]));
+					   I915_READ(engine->semaphore.mbox.signal[j]));
 		seq_putc(m, '\n');
 	}
 
 	seq_puts(m, "\nSync seqno:\n");
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv, i) {
 		for (j = 0; j < num_rings; j++) {
-			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
+			seq_printf(m, "  0x%08x ", engine->semaphore.sync_seqno[j]);
 		}
 		seq_putc(m, '\n');
 	}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 5789e7b..d681226 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -141,13 +141,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		value = 1;
 		break;
 	case I915_PARAM_HAS_BSD:
-		value = intel_ring_initialized(&dev_priv->ring[VCS]);
+		value = intel_engine_initialized(&dev_priv->engine[VCS]);
 		break;
 	case I915_PARAM_HAS_BLT:
-		value = intel_ring_initialized(&dev_priv->ring[BCS]);
+		value = intel_engine_initialized(&dev_priv->engine[BCS]);
 		break;
 	case I915_PARAM_HAS_VEBOX:
-		value = intel_ring_initialized(&dev_priv->ring[VECS]);
+		value = intel_engine_initialized(&dev_priv->engine[VECS]);
 		break;
 	case I915_PARAM_HAS_RELAXED_FENCING:
 		value = 1;
@@ -1107,8 +1107,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
-	if (file_priv && file_priv->bsd_ring)
-		file_priv->bsd_ring = NULL;
 	kfree(file_priv);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 619f21d..9c26e6e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -345,14 +345,14 @@ struct drm_i915_error_state {
 		/* Software tracked state */
 		bool waiting;
 		int hangcheck_score;
-		enum intel_ring_hangcheck_action hangcheck_action;
+		enum intel_engine_hangcheck_action hangcheck_action;
 		int num_requests;
 
 		/* our own tracking of ring head and tail */
 		u32 cpu_ring_head;
 		u32 cpu_ring_tail;
 
-		u32 semaphore_seqno[I915_NUM_RINGS - 1];
+		u32 semaphore_seqno[I915_NUM_ENGINES - 1];
 
 		/* Register state */
 		u32 tail;
@@ -371,7 +371,7 @@ struct drm_i915_error_state {
 		u32 fault_reg;
 		u64 faddr;
 		u32 rc_psmi; /* sleep state */
-		u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
 
 		struct drm_i915_error_object {
 			int page_count;
@@ -395,7 +395,7 @@ struct drm_i915_error_state {
 
 		pid_t pid;
 		char comm[TASK_COMM_LEN];
-	} ring[I915_NUM_RINGS];
+	} ring[I915_NUM_ENGINES];
 
 	struct drm_i915_error_buffer {
 		u32 size;
@@ -475,7 +475,7 @@ struct drm_i915_display_funcs {
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
 			  struct drm_framebuffer *fb,
 			  struct drm_i915_gem_object *obj,
-			  struct intel_engine_cs *ring,
+			  struct intel_engine_cs *engine,
 			  uint32_t flags);
 	void (*update_primary_plane)(struct drm_crtc *crtc,
 				     struct drm_framebuffer *fb,
@@ -638,7 +638,7 @@ struct intel_context {
 	struct {
 		struct drm_i915_gem_object *state;
 		struct intel_ringbuffer *ringbuf;
-	} engine[I915_NUM_RINGS];
+	} ring[I915_NUM_ENGINES];
 
 	struct list_head link;
 };
@@ -1430,7 +1430,7 @@ struct drm_i915_private {
 	wait_queue_head_t gmbus_wait_queue;
 
 	struct pci_dev *bridge_dev;
-	struct intel_engine_cs ring[I915_NUM_RINGS];
+	struct intel_engine_cs engine[I915_NUM_ENGINES];
 	struct drm_i915_gem_object *semaphore_obj;
 	uint32_t last_seqno, next_seqno;
 
@@ -1629,15 +1629,15 @@ struct drm_i915_private {
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
 		int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
-				  struct intel_engine_cs *ring,
+				  struct intel_engine_cs *engine,
 				  struct intel_context *ctx,
 				  struct drm_i915_gem_execbuffer2 *args,
 				  struct list_head *vmas,
 				  struct drm_i915_gem_object *batch_obj,
 				  u64 exec_start, u32 flags);
 		int (*init_rings)(struct drm_device *dev);
-		void (*cleanup_ring)(struct intel_engine_cs *ring);
-		void (*stop_ring)(struct intel_engine_cs *ring);
+		void (*cleanup_ring)(struct intel_engine_cs *engine);
+		void (*stop_ring)(struct intel_engine_cs *engine);
 	} gt;
 
 	/*
@@ -1652,9 +1652,9 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
 }
 
 /* Iterate over initialised rings */
-#define for_each_ring(ring__, dev_priv__, i__) \
-	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
-		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+#define for_each_engine(engine__, dev_priv__, i__) \
+	for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
+		if (((engine__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((engine__)))
 
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
@@ -1851,7 +1851,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
  */
 struct drm_i915_gem_request {
 	/** On Which ring this request was generated */
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 
 	/** GEM sequence number associated with this request. */
 	uint32_t seqno;
@@ -1891,7 +1891,7 @@ struct drm_i915_file_private {
 	struct idr context_idr;
 
 	atomic_t rps_wait_boost;
-	struct  intel_engine_cs *bsd_ring;
+	struct  intel_engine_cs *bsd_engine;
 };
 
 /*
@@ -2250,14 +2250,14 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-					struct intel_engine_cs *ring);
+					struct intel_engine_cs *engine);
 void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
 					 struct drm_file *file,
-					 struct intel_engine_cs *ring,
+					 struct intel_engine_cs *engine,
 					 struct drm_i915_gem_object *obj);
 int i915_gem_ringbuffer_submission(struct drm_device *dev,
 				   struct drm_file *file,
-				   struct intel_engine_cs *ring,
+				   struct intel_engine_cs *engine,
 				   struct intel_context *ctx,
 				   struct drm_i915_gem_execbuffer2 *args,
 				   struct list_head *vmas,
@@ -2351,7 +2351,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_engine_cs *to);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring);
+			     struct intel_engine_cs *engine);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -2375,13 +2375,13 @@ bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring);
+i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
+void i915_gem_retire_requests__engine(struct intel_engine_cs *engine);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
-int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *engine, u32 seqno);
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
@@ -2415,20 +2415,19 @@ void i915_gem_reset(struct drm_device *dev);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+int i915_gem_l3_remap(struct intel_engine_cs *engine, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
+int __i915_add_request(struct intel_engine_cs *engine,
 		       struct drm_file *file,
 		       struct drm_i915_gem_object *batch_obj,
 		       u32 *seqno);
 #define i915_add_request(ring, seqno) \
 	__i915_add_request(ring, NULL, NULL, seqno)
-int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
+int __must_check i915_wait_seqno(struct intel_engine_cs *engine,
 				 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
@@ -2545,7 +2544,7 @@ void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 int i915_gem_context_enable(struct drm_i915_private *dev_priv);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
+int i915_switch_context(struct intel_engine_cs *engine,
 			struct intel_context *to);
 struct intel_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
@@ -2573,7 +2572,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file);
 
 /* i915_gem_render_state.c */
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
+int i915_gem_render_state_init(struct intel_engine_cs *engine);
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev,
 					  struct i915_address_space *vm,
@@ -2659,10 +2658,10 @@ const char *i915_cache_level_str(int type);
 
 /* i915_cmd_parser.c */
 int i915_cmd_parser_get_version(void);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_cmd_parser_init_engine(struct intel_engine_cs *engine);
+void i915_cmd_parser_fini_engine(struct intel_engine_cs *engine);
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
+int i915_parse_cmds(struct intel_engine_cs *engine,
 		    struct drm_i915_gem_object *batch_obj,
 		    u32 batch_start_offset,
 		    bool is_master);
@@ -2764,7 +2763,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
 			       struct drm_file *file);
 
-void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+void intel_notify_mmio_flip(struct intel_engine_cs *engine);
 
 /* overlay */
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 05d91a9..50e20da 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1098,15 +1098,15 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
  * equal.
  */
 int
-i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+i915_gem_check_olr(struct intel_engine_cs *engine, u32 seqno)
 {
 	int ret;
 
-	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&engine->dev->struct_mutex));
 
 	ret = 0;
-	if (seqno == ring->outstanding_lazy_seqno)
-		ret = i915_add_request(ring, NULL);
+	if (seqno == engine->outstanding_lazy_seqno)
+		ret = i915_add_request(engine, NULL);
 
 	return ret;
 }
@@ -1117,9 +1117,9 @@ static void fake_irq(unsigned long data)
 }
 
 static bool missed_irq(struct drm_i915_private *dev_priv,
-		       struct intel_engine_cs *ring)
+		       struct intel_engine_cs *engine)
 {
-	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+	return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
 }
 
 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
@@ -1148,16 +1148,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
  * Returns 0 if the seqno was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+static int __wait_seqno(struct intel_engine_cs *engine, u32 seqno,
 			unsigned reset_counter,
 			bool interruptible,
 			s64 *timeout,
 			struct drm_i915_file_private *file_priv)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const bool irq_test_in_progress =
-		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
 	s64 before, now;
@@ -1165,12 +1165,12 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 
 	WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
-	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+	if (i915_seqno_passed(engine->get_seqno(engine, true), seqno))
 		return 0;
 
 	timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
 
-	if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
+	if (INTEL_INFO(dev)->gen >= 6 && engine->id == RCS && can_wait_boost(file_priv)) {
 		gen6_rps_boost(dev_priv);
 		if (file_priv)
 			mod_delayed_work(dev_priv->wq,
@@ -1178,16 +1178,16 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 					 msecs_to_jiffies(100));
 	}
 
-	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
+	if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine)))
 		return -ENODEV;
 
 	/* Record current time in case interrupted by signal, or wedged */
-	trace_i915_gem_request_wait_begin(ring, seqno);
+	trace_i915_gem_request_wait_begin(engine, seqno);
 	before = ktime_get_raw_ns();
 	for (;;) {
 		struct timer_list timer;
 
-		prepare_to_wait(&ring->irq_queue, &wait,
+		prepare_to_wait(&engine->irq_queue, &wait,
 				interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 
 		/* We need to check whether any gpu reset happened in between
@@ -1201,7 +1201,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 			break;
 		}
 
-		if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+		if (i915_seqno_passed(engine->get_seqno(engine, false), seqno)) {
 			ret = 0;
 			break;
 		}
@@ -1217,11 +1217,11 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 		}
 
 		timer.function = NULL;
-		if (timeout || missed_irq(dev_priv, ring)) {
+		if (timeout || missed_irq(dev_priv, engine)) {
 			unsigned long expire;
 
 			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+			expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
 			mod_timer(&timer, expire);
 		}
 
@@ -1233,12 +1233,12 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 		}
 	}
 	now = ktime_get_raw_ns();
-	trace_i915_gem_request_wait_end(ring, seqno);
+	trace_i915_gem_request_wait_end(engine, seqno);
 
 	if (!irq_test_in_progress)
-		ring->irq_put(ring);
+		engine->irq_put(engine);
 
-	finish_wait(&ring->irq_queue, &wait);
+	finish_wait(&engine->irq_queue, &wait);
 
 	if (timeout) {
 		s64 tres = *timeout - (now - before);
@@ -1254,9 +1254,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
  * request and object lists appropriately for that event.
  */
 int
-i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
+i915_wait_seqno(struct intel_engine_cs *engine, uint32_t seqno)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool interruptible = dev_priv->mm.interruptible;
 	int ret;
@@ -1268,18 +1268,18 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_olr(ring, seqno);
+	ret = i915_gem_check_olr(engine, seqno);
 	if (ret)
 		return ret;
 
-	return __wait_seqno(ring, seqno,
+	return __wait_seqno(engine, seqno,
 			    atomic_read(&dev_priv->gpu_error.reset_counter),
 			    interruptible, NULL, NULL);
 }
 
 static int
 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
-				     struct intel_engine_cs *ring)
+				     struct intel_engine_cs *engine)
 {
 	if (!obj->active)
 		return 0;
@@ -1304,7 +1304,7 @@ static __must_check int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			       bool readonly)
 {
-	struct intel_engine_cs *ring = obj->ring;
+	struct intel_engine_cs *engine = obj->ring;
 	u32 seqno;
 	int ret;
 
@@ -1312,11 +1312,11 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 	if (seqno == 0)
 		return 0;
 
-	ret = i915_wait_seqno(ring, seqno);
+	ret = i915_wait_seqno(engine, seqno);
 	if (ret)
 		return ret;
 
-	return i915_gem_object_wait_rendering__tail(obj, ring);
+	return i915_gem_object_wait_rendering__tail(obj, engine);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1329,7 +1329,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = obj->ring;
+	struct intel_engine_cs *engine = obj->ring;
 	unsigned reset_counter;
 	u32 seqno;
 	int ret;
@@ -1345,18 +1345,18 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_olr(ring, seqno);
+	ret = i915_gem_check_olr(engine, seqno);
 	if (ret)
 		return ret;
 
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+	ret = __wait_seqno(engine, seqno, reset_counter, true, NULL, file_priv);
 	mutex_lock(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	return i915_gem_object_wait_rendering__tail(obj, ring);
+	return i915_gem_object_wait_rendering__tail(obj, engine);
 }
 
 /**
@@ -2149,16 +2149,16 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-			       struct intel_engine_cs *ring)
+			       struct intel_engine_cs *engine)
 {
-	u32 seqno = intel_ring_get_seqno(ring);
+	u32 seqno = intel_engine_get_seqno(engine);
 
-	BUG_ON(ring == NULL);
-	if (obj->ring != ring && obj->last_write_seqno) {
+	BUG_ON(engine == NULL);
+	if (obj->ring != engine && obj->last_write_seqno) {
 		/* Keep the seqno relative to the current ring */
 		obj->last_write_seqno = seqno;
 	}
-	obj->ring = ring;
+	obj->ring = engine;
 
 	/* Add a reference if we're newly entering the active list. */
 	if (!obj->active) {
@@ -2166,16 +2166,16 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 		obj->active = 1;
 	}
 
-	list_move_tail(&obj->ring_list, &ring->active_list);
+	list_move_tail(&obj->ring_list, &engine->active_list);
 
 	obj->last_read_seqno = seqno;
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring)
+			     struct intel_engine_cs *engine)
 {
 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
-	return i915_gem_object_move_to_active(vma->obj, ring);
+	return i915_gem_object_move_to_active(vma->obj, engine);
 }
 
 static void
@@ -2214,12 +2214,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_retire(struct drm_i915_gem_object *obj)
 {
-	struct intel_engine_cs *ring = obj->ring;
+	struct intel_engine_cs *engine = obj->ring;
 
-	if (ring == NULL)
+	if (engine == NULL)
 		return;
 
-	if (i915_seqno_passed(ring->get_seqno(ring, true),
+	if (i915_seqno_passed(engine->get_seqno(engine, true),
 			      obj->last_read_seqno))
 		i915_gem_object_move_to_inactive(obj);
 }
@@ -2228,23 +2228,23 @@ static int
 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i, j;
 
 	/* Carefully retire all requests without writing to the rings */
-	for_each_ring(ring, dev_priv, i) {
-		ret = intel_ring_idle(ring);
+	for_each_engine(engine, dev_priv, i) {
+		ret = intel_engine_idle(engine);
 		if (ret)
 			return ret;
 	}
 	i915_gem_retire_requests(dev);
 
 	/* Finally reset hw state */
-	for_each_ring(ring, dev_priv, i) {
-		intel_ring_init_seqno(ring, seqno);
+	for_each_engine(engine, dev_priv, i) {
+		intel_engine_init_seqno(engine, seqno);
 
-		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
-			ring->semaphore.sync_seqno[j] = 0;
+		for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
+			engine->semaphore.sync_seqno[j] = 0;
 	}
 
 	return 0;
@@ -2294,26 +2294,26 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 	return 0;
 }
 
-int __i915_add_request(struct intel_engine_cs *ring,
+int __i915_add_request(struct intel_engine_cs *engine,
 		       struct drm_file *file,
 		       struct drm_i915_gem_object *obj,
 		       u32 *out_seqno)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct drm_i915_gem_request *request;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_ring_position, request_start;
 	int ret;
 
-	request = ring->preallocated_lazy_request;
+	request = engine->preallocated_lazy_request;
 	if (WARN_ON(request == NULL))
 		return -ENOMEM;
 
 	if (i915.enable_execlists) {
 		struct intel_context *ctx = request->ctx;
-		ringbuf = ctx->engine[ring->id].ringbuf;
+		ringbuf = ctx->ring[engine->id].ringbuf;
 	} else
-		ringbuf = ring->buffer;
+		ringbuf = engine->buffer;
 
 	request_start = intel_ring_get_tail(ringbuf);
 	/*
@@ -2328,7 +2328,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
 		if (ret)
 			return ret;
 	} else {
-		ret = intel_ring_flush_all_caches(ring);
+		ret = intel_engine_flush_all_caches(engine);
 		if (ret)
 			return ret;
 	}
@@ -2341,17 +2341,17 @@ int __i915_add_request(struct intel_engine_cs *ring,
 	request_ring_position = intel_ring_get_tail(ringbuf);
 
 	if (i915.enable_execlists) {
-		ret = ring->emit_request(ringbuf);
+		ret = engine->emit_request(ringbuf);
 		if (ret)
 			return ret;
 	} else {
-		ret = ring->add_request(ring);
+		ret = engine->add_request(engine);
 		if (ret)
 			return ret;
 	}
 
-	request->seqno = intel_ring_get_seqno(ring);
-	request->ring = ring;
+	request->seqno = intel_engine_get_seqno(engine);
+	request->engine = engine;
 	request->head = request_start;
 	request->tail = request_ring_position;
 
@@ -2367,13 +2367,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
 		/* Hold a reference to the current context so that we can inspect
 		 * it later in case a hangcheck error event fires.
 		 */
-		request->ctx = ring->last_context;
+		request->ctx = engine->last_context;
 		if (request->ctx)
 			i915_gem_context_reference(request->ctx);
 	}
 
 	request->emitted_jiffies = jiffies;
-	list_add_tail(&request->list, &ring->request_list);
+	list_add_tail(&request->list, &engine->request_list);
 	request->file_priv = NULL;
 
 	if (file) {
@@ -2386,12 +2386,12 @@ int __i915_add_request(struct intel_engine_cs *ring,
 		spin_unlock(&file_priv->mm.lock);
 	}
 
-	trace_i915_gem_request_add(ring, request->seqno);
-	ring->outstanding_lazy_seqno = 0;
-	ring->preallocated_lazy_request = NULL;
+	trace_i915_gem_request_add(engine, request->seqno);
+	engine->outstanding_lazy_seqno = 0;
+	engine->preallocated_lazy_request = NULL;
 
 	if (!dev_priv->ums.mm_suspended) {
-		i915_queue_hangcheck(ring->dev);
+		i915_queue_hangcheck(engine->dev);
 
 		cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 		queue_delayed_work(dev_priv->wq,
@@ -2475,14 +2475,14 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
 }
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring)
+i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request;
 	u32 completed_seqno;
 
-	completed_seqno = ring->get_seqno(ring, false);
+	completed_seqno = engine->get_seqno(engine, false);
 
-	list_for_each_entry(request, &ring->request_list, list) {
+	list_for_each_entry(request, &engine->request_list, list) {
 		if (i915_seqno_passed(completed_seqno, request->seqno))
 			continue;
 
@@ -2492,32 +2492,32 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
 	return NULL;
 }
 
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
-				       struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
+				       struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request;
 	bool ring_hung;
 
-	request = i915_gem_find_active_request(ring);
+	request = i915_gem_find_active_request(engine);
 
 	if (request == NULL)
 		return;
 
-	ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
 	i915_set_reset_status(dev_priv, request->ctx, ring_hung);
 
-	list_for_each_entry_continue(request, &ring->request_list, list)
+	list_for_each_entry_continue(request, &engine->request_list, list)
 		i915_set_reset_status(dev_priv, request->ctx, false);
 }
 
-static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
-					struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
+					struct intel_engine_cs *engine)
 {
-	while (!list_empty(&ring->active_list)) {
+	while (!list_empty(&engine->active_list)) {
 		struct drm_i915_gem_object *obj;
 
-		obj = list_first_entry(&ring->active_list,
+		obj = list_first_entry(&engine->active_list,
 				       struct drm_i915_gem_object,
 				       ring_list);
 
@@ -2531,20 +2531,20 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 	 * implicit references on things like e.g. ppgtt address spaces through
 	 * the request.
 	 */
-	while (!list_empty(&ring->request_list)) {
+	while (!list_empty(&engine->request_list)) {
 		struct drm_i915_gem_request *request;
 
-		request = list_first_entry(&ring->request_list,
+		request = list_first_entry(&engine->request_list,
 					   struct drm_i915_gem_request,
 					   list);
 
 		i915_gem_free_request(request);
 	}
 
-	while (!list_empty(&ring->execlist_queue)) {
+	while (!list_empty(&engine->execlist_queue)) {
 		struct intel_ctx_submit_request *submit_req;
 
-		submit_req = list_first_entry(&ring->execlist_queue,
+		submit_req = list_first_entry(&engine->execlist_queue,
 				struct intel_ctx_submit_request,
 				execlist_link);
 		list_del(&submit_req->execlist_link);
@@ -2554,9 +2554,9 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 	}
 
 	/* These may not have been flush before the reset, do so now */
-	kfree(ring->preallocated_lazy_request);
-	ring->preallocated_lazy_request = NULL;
-	ring->outstanding_lazy_seqno = 0;
+	kfree(engine->preallocated_lazy_request);
+	engine->preallocated_lazy_request = NULL;
+	engine->outstanding_lazy_seqno = 0;
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2583,7 +2583,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
 void i915_gem_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
 	/*
@@ -2591,11 +2591,11 @@ void i915_gem_reset(struct drm_device *dev)
 	 * them for finding the guilty party. As the requests only borrow
 	 * their reference to the objects, the inspection must be done first.
 	 */
-	for_each_ring(ring, dev_priv, i)
-		i915_gem_reset_ring_status(dev_priv, ring);
+	for_each_engine(engine, dev_priv, i)
+		i915_gem_reset_engine_status(dev_priv, engine);
 
-	for_each_ring(ring, dev_priv, i)
-		i915_gem_reset_ring_cleanup(dev_priv, ring);
+	for_each_engine(engine, dev_priv, i)
+		i915_gem_reset_engine_cleanup(dev_priv, engine);
 
 	i915_gem_context_reset(dev);
 
@@ -2606,25 +2606,25 @@ void i915_gem_reset(struct drm_device *dev)
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+i915_gem_retire_requests__engine(struct intel_engine_cs *engine)
 {
 	uint32_t seqno;
 
-	if (list_empty(&ring->request_list))
+	if (list_empty(&engine->request_list))
 		return;
 
-	WARN_ON(i915_verify_lists(ring->dev));
+	WARN_ON(i915_verify_lists(engine->dev));
 
-	seqno = ring->get_seqno(ring, true);
+	seqno = engine->get_seqno(engine, true);
 
 	/* Move any buffers on the active list that are no longer referenced
 	 * by the ringbuffer to the flushing/inactive lists as appropriate,
 	 * before we free the context associated with the requests.
 	 */
-	while (!list_empty(&ring->active_list)) {
+	while (!list_empty(&engine->active_list)) {
 		struct drm_i915_gem_object *obj;
 
-		obj = list_first_entry(&ring->active_list,
+		obj = list_first_entry(&engine->active_list,
 				      struct drm_i915_gem_object,
 				      ring_list);
 
@@ -2635,18 +2635,18 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 	}
 
 
-	while (!list_empty(&ring->request_list)) {
+	while (!list_empty(&engine->request_list)) {
 		struct drm_i915_gem_request *request;
 		struct intel_ringbuffer *ringbuf;
 
-		request = list_first_entry(&ring->request_list,
+		request = list_first_entry(&engine->request_list,
 					   struct drm_i915_gem_request,
 					   list);
 
 		if (!i915_seqno_passed(seqno, request->seqno))
 			break;
 
-		trace_i915_gem_request_retire(ring, request->seqno);
+		trace_i915_gem_request_retire(engine, request->seqno);
 
 		/* This is one of the few common intersection points
 		 * between legacy ringbuffer submission and execlists:
@@ -2655,9 +2655,9 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 		 */
 		if (i915.enable_execlists) {
 			struct intel_context *ctx = request->ctx;
-			ringbuf = ctx->engine[ring->id].ringbuf;
+			ringbuf = ctx->ring[engine->id].ringbuf;
 		} else
-			ringbuf = ring->buffer;
+			ringbuf = engine->buffer;
 
 		/* We know the GPU must have read the request to have
 		 * sent us the seqno + interrupt, so use the position
@@ -2669,26 +2669,26 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 		i915_gem_free_request(request);
 	}
 
-	if (unlikely(ring->trace_irq_seqno &&
-		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
-		ring->irq_put(ring);
-		ring->trace_irq_seqno = 0;
+	if (unlikely(engine->trace_irq_seqno &&
+		     i915_seqno_passed(seqno, engine->trace_irq_seqno))) {
+		engine->irq_put(engine);
+		engine->trace_irq_seqno = 0;
 	}
 
-	WARN_ON(i915_verify_lists(ring->dev));
+	WARN_ON(i915_verify_lists(engine->dev));
 }
 
 bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool idle = true;
 	int i;
 
-	for_each_ring(ring, dev_priv, i) {
-		i915_gem_retire_requests_ring(ring);
-		idle &= list_empty(&ring->request_list);
+	for_each_engine(engine, dev_priv, i) {
+		i915_gem_retire_requests__engine(engine);
+		idle &= list_empty(&engine->request_list);
 	}
 
 	if (idle)
@@ -2742,7 +2742,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 		if (ret)
 			return ret;
 
-		i915_gem_retire_requests_ring(obj->ring);
+		i915_gem_retire_requests__engine(obj->ring);
 	}
 
 	return 0;
@@ -2776,7 +2776,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_wait *args = data;
 	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *ring = NULL;
+	struct intel_engine_cs *engine = NULL;
 	unsigned reset_counter;
 	u32 seqno = 0;
 	int ret = 0;
@@ -2798,7 +2798,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 	if (obj->active) {
 		seqno = obj->last_read_seqno;
-		ring = obj->ring;
+		engine = obj->ring;
 	}
 
 	if (seqno == 0)
@@ -2816,7 +2816,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
 
-	return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
+	return __wait_seqno(engine, seqno, reset_counter, true, &args->timeout_ns,
 			    file->driver_priv);
 
 out:
@@ -2851,7 +2851,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
 		return i915_gem_object_wait_rendering(obj, false);
 
-	idx = intel_ring_sync_index(from, to);
+	idx = intel_engine_sync_index(from, to);
 
 	seqno = obj->last_read_seqno;
 	/* Optimization: Avoid semaphore sync when we are sure we already
@@ -2965,16 +2965,16 @@ int i915_vma_unbind(struct i915_vma *vma)
 int i915_gpu_idle(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i;
 
 	/* Flush everything onto the inactive list. */
-	for_each_ring(ring, dev_priv, i) {
-		ret = i915_switch_context(ring, ring->default_context);
+	for_each_engine(engine, dev_priv, i) {
+		ret = i915_switch_context(engine, engine->default_context);
 		if (ret)
 			return ret;
 
-		ret = intel_ring_idle(ring);
+		ret = intel_engine_idle(engine);
 		if (ret)
 			return ret;
 	}
@@ -4003,7 +4003,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
 	struct drm_i915_gem_request *request;
-	struct intel_engine_cs *ring = NULL;
+	struct intel_engine_cs *engine = NULL;
 	unsigned reset_counter;
 	u32 seqno = 0;
 	int ret;
@@ -4021,7 +4021,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
 			break;
 
-		ring = request->ring;
+		engine = request->engine;
 		seqno = request->seqno;
 	}
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4030,7 +4030,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (seqno == 0)
 		return 0;
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+	ret = __wait_seqno(engine, seqno, reset_counter, true, NULL, NULL);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -4268,8 +4268,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 
 	args->busy = obj->active;
 	if (obj->ring) {
-		BUILD_BUG_ON(I915_NUM_RINGS > 16);
-		args->busy |= intel_ring_flag(obj->ring) << 16;
+		BUILD_BUG_ON(I915_NUM_ENGINES > 16);
+		args->busy |= intel_engine_flag(obj->ring) << 16;
 	}
 
 	drm_gem_object_unreference(&obj->base);
@@ -4525,11 +4525,11 @@ static void
 i915_gem_stop_ringbuffers(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
-		dev_priv->gt.stop_ring(ring);
+	for_each_engine(engine, dev_priv, i)
+		dev_priv->gt.stop_ring(engine);
 }
 
 int
@@ -4573,9 +4573,9 @@ err:
 	return ret;
 }
 
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct intel_engine_cs *engine, int slice)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
@@ -4584,7 +4584,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
 	if (!HAS_L3_DPF(dev) || !remap_info)
 		return 0;
 
-	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+	ret = intel_ring_begin(engine, GEN7_L3LOG_SIZE / 4 * 3);
 	if (ret)
 		return ret;
 
@@ -4594,12 +4594,12 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
 	 * at initialization time.
 	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(ring, reg_base + i);
-		intel_ring_emit(ring, remap_info[i/4]);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(engine, reg_base + i);
+		intel_ring_emit(engine, remap_info[i/4]);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return ret;
 }
@@ -4645,55 +4645,55 @@ intel_enable_blt(struct drm_device *dev)
 	return true;
 }
 
-int i915_gem_init_rings(struct drm_device *dev)
+static int i915_gem_init_rings(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = intel_init_render_ring_buffer(dev);
+	ret = intel_init_render_engine(dev);
 	if (ret)
 		return ret;
 
 	if (HAS_BSD(dev)) {
-		ret = intel_init_bsd_ring_buffer(dev);
+		ret = intel_init_bsd_engine(dev);
 		if (ret)
-			goto cleanup_render_ring;
+			goto cleanup_render;
 	}
 
 	if (intel_enable_blt(dev)) {
-		ret = intel_init_blt_ring_buffer(dev);
+		ret = intel_init_blt_engine(dev);
 		if (ret)
-			goto cleanup_bsd_ring;
+			goto cleanup_bsd;
 	}
 
 	if (HAS_VEBOX(dev)) {
-		ret = intel_init_vebox_ring_buffer(dev);
+		ret = intel_init_vebox_engine(dev);
 		if (ret)
-			goto cleanup_blt_ring;
+			goto cleanup_blt;
 	}
 
 	if (HAS_BSD2(dev)) {
-		ret = intel_init_bsd2_ring_buffer(dev);
+		ret = intel_init_bsd2_engine(dev);
 		if (ret)
-			goto cleanup_vebox_ring;
+			goto cleanup_vebox;
 	}
 
 	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
 	if (ret)
-		goto cleanup_bsd2_ring;
+		goto cleanup_bsd2;
 
 	return 0;
 
-cleanup_bsd2_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
-cleanup_vebox_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
-cleanup_blt_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
-cleanup_bsd_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
-cleanup_render_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+cleanup_bsd2:
+	intel_cleanup_engine(&dev_priv->engine[VCS2]);
+cleanup_vebox:
+	intel_cleanup_engine(&dev_priv->engine[VECS]);
+cleanup_blt:
+	intel_cleanup_engine(&dev_priv->engine[BCS]);
+cleanup_bsd:
+	intel_cleanup_engine(&dev_priv->engine[VCS]);
+cleanup_render:
+	intel_cleanup_engine(&dev_priv->engine[RCS]);
 
 	return ret;
 }
@@ -4733,7 +4733,7 @@ i915_gem_init_hw(struct drm_device *dev)
 		return ret;
 
 	for (i = 0; i < NUM_L3_SLICES(dev); i++)
-		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+		i915_gem_l3_remap(&dev_priv->engine[RCS], i);
 
 	/*
 	 * XXX: Contexts should only be initialized once. Doing a switch to the
@@ -4780,8 +4780,8 @@ int i915_gem_init(struct drm_device *dev)
 	if (!i915.enable_execlists) {
 		dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
 		dev_priv->gt.init_rings = i915_gem_init_rings;
-		dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
-		dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+		dev_priv->gt.cleanup_ring = intel_cleanup_engine;
+		dev_priv->gt.stop_ring = intel_stop_engine;
 	} else {
 		dev_priv->gt.do_execbuf = intel_execlists_submission;
 		dev_priv->gt.init_rings = intel_logical_rings_init;
@@ -4822,11 +4822,11 @@ void
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
-		dev_priv->gt.cleanup_ring(ring);
+	for_each_engine(engine, dev_priv, i)
+		dev_priv->gt.cleanup_ring(engine);
 }
 
 int
@@ -4898,10 +4898,10 @@ i915_gem_lastclose(struct drm_device *dev)
 }
 
 static void
-init_ring_lists(struct intel_engine_cs *ring)
+init_engine_lists(struct intel_engine_cs *engine)
 {
-	INIT_LIST_HEAD(&ring->active_list);
-	INIT_LIST_HEAD(&ring->request_list);
+	INIT_LIST_HEAD(&engine->active_list);
+	INIT_LIST_HEAD(&engine->request_list);
 }
 
 void i915_init_vm(struct drm_i915_private *dev_priv,
@@ -4935,8 +4935,8 @@ i915_gem_load(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		init_ring_lists(&dev_priv->ring[i]);
+	for (i = 0; i < I915_NUM_ENGINES; i++)
+		init_engine_lists(&dev_priv->engine[i]);
 	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 9683e62..2dde547 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -291,10 +291,10 @@ void i915_gem_context_reset(struct drm_device *dev)
 
 	/* Prevent the hardware from restoring the last context (which hung) on
 	 * the next switch */
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
-		struct intel_context *dctx = ring->default_context;
-		struct intel_context *lctx = ring->last_context;
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct intel_engine_cs *engine = &dev_priv->engine[i];
+		struct intel_context *dctx = engine->default_context;
+		struct intel_context *lctx = engine->last_context;
 
 		/* Do a fake switch to the default context */
 		if (lctx == dctx)
@@ -316,7 +316,7 @@ void i915_gem_context_reset(struct drm_device *dev)
 
 		i915_gem_context_unreference(lctx);
 		i915_gem_context_reference(dctx);
-		ring->last_context = dctx;
+		engine->last_context = dctx;
 	}
 }
 
@@ -328,7 +328,7 @@ int i915_gem_context_init(struct drm_device *dev)
 
 	/* Init should only be called once per module load. Eventually the
 	 * restriction on the context_disabled check can be loosened. */
-	if (WARN_ON(dev_priv->ring[RCS].default_context))
+	if (WARN_ON(dev_priv->engine[RCS].default_context))
 		return 0;
 
 	if (i915.enable_execlists) {
@@ -351,11 +351,11 @@ int i915_gem_context_init(struct drm_device *dev)
 		return PTR_ERR(ctx);
 	}
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct intel_engine_cs *engine = &dev_priv->engine[i];
 
 		/* NB: RCS will hold a ref for all rings */
-		ring->default_context = ctx;
+		engine->default_context = ctx;
 	}
 
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -367,7 +367,7 @@ int i915_gem_context_init(struct drm_device *dev)
 void i915_gem_context_fini(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+	struct intel_context *dctx = dev_priv->engine[RCS].default_context;
 	int i;
 
 	if (dctx->legacy_hw_ctx.rcs_state) {
@@ -382,26 +382,26 @@ void i915_gem_context_fini(struct drm_device *dev)
 		 * to default context. So we need to unreference the base object once
 		 * to offset the do_switch part, so that i915_gem_context_unreference()
 		 * can then free the base object correctly. */
-		WARN_ON(!dev_priv->ring[RCS].last_context);
-		if (dev_priv->ring[RCS].last_context == dctx) {
+		WARN_ON(!dev_priv->engine[RCS].last_context);
+		if (dev_priv->engine[RCS].last_context == dctx) {
 			/* Fake switch to NULL context */
 			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
 			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
 			i915_gem_context_unreference(dctx);
-			dev_priv->ring[RCS].last_context = NULL;
+			dev_priv->engine[RCS].last_context = NULL;
 		}
 
 		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
 	}
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct intel_engine_cs *engine = &dev_priv->engine[i];
 
-		if (ring->last_context)
-			i915_gem_context_unreference(ring->last_context);
+		if (engine->last_context)
+			i915_gem_context_unreference(engine->last_context);
 
-		ring->default_context = NULL;
-		ring->last_context = NULL;
+		engine->default_context = NULL;
+		engine->last_context = NULL;
 	}
 
 	i915_gem_context_unreference(dctx);
@@ -409,17 +409,17 @@ void i915_gem_context_fini(struct drm_device *dev)
 
 int i915_gem_context_enable(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i;
 
 	/* FIXME: We should make this work, even in reset */
 	if (i915_reset_in_progress(&dev_priv->gpu_error))
 		return 0;
 
-	BUG_ON(!dev_priv->ring[RCS].default_context);
+	BUG_ON(!dev_priv->engine[RCS].default_context);
 
-	for_each_ring(ring, dev_priv, i) {
-		ret = i915_switch_context(ring, ring->default_context);
+	for_each_engine(engine, dev_priv, i) {
+		ret = i915_switch_context(engine, engine->default_context);
 		if (ret)
 			return ret;
 	}
@@ -475,7 +475,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 }
 
 static inline int
-mi_set_context(struct intel_engine_cs *ring,
+mi_set_context(struct intel_engine_cs *engine,
 	       struct intel_context *new_context,
 	       u32 hw_flags)
 {
@@ -483,28 +483,28 @@ mi_set_context(struct intel_engine_cs *ring,
 
 	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
 	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
-	 * explicitly, so we rely on the value at ring init, stored in
+	 * explicitly, so we rely on the value at engine init, stored in
 	 * itlb_before_ctx_switch.
 	 */
-	if (IS_GEN6(ring->dev)) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+	if (IS_GEN6(engine->dev)) {
+		ret = engine->flush(engine, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret)
 		return ret;
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-	if (INTEL_INFO(ring->dev)->gen >= 7)
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+	if (INTEL_INFO(engine->dev)->gen >= 7)
+		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 	else
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_SET_CONTEXT);
+	intel_ring_emit(engine, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
 			MI_MM_SPACE_GTT |
 			MI_SAVE_EXT_STATE_EN |
 			MI_RESTORE_EXT_STATE_EN |
@@ -513,28 +513,28 @@ mi_set_context(struct intel_engine_cs *ring,
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, MI_NOOP);
 
-	if (INTEL_INFO(ring->dev)->gen >= 7)
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+	if (INTEL_INFO(engine->dev)->gen >= 7)
+		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 	else
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return ret;
 }
 
-static int do_switch(struct intel_engine_cs *ring,
+static int do_switch(struct intel_engine_cs *engine,
 		     struct intel_context *to)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct intel_context *from = ring->last_context;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct intel_context *from = engine->last_context;
 	u32 hw_flags = 0;
 	bool uninitialized = false;
 	int ret, i;
 
-	if (from != NULL && ring == &dev_priv->ring[RCS]) {
+	if (from != NULL && engine == &dev_priv->engine[RCS]) {
 		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
 		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
 	}
@@ -543,9 +543,9 @@ static int do_switch(struct intel_engine_cs *ring,
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
-	if (ring == &dev_priv->ring[RCS]) {
+	if (engine == &dev_priv->engine[RCS]) {
 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-					    get_context_alignment(ring->dev), 0);
+					    get_context_alignment(engine->dev), 0);
 		if (ret)
 			return ret;
 	}
@@ -555,15 +555,15 @@ static int do_switch(struct intel_engine_cs *ring,
 	 * evict_everything - as a last ditch gtt defrag effort that also
 	 * switches to the default context. Hence we need to reload from here.
 	 */
-	from = ring->last_context;
+	from = engine->last_context;
 
 	if (to->ppgtt) {
-		ret = to->ppgtt->switch_mm(to->ppgtt, ring, false);
+		ret = to->ppgtt->switch_mm(to->ppgtt, engine, false);
 		if (ret)
 			goto unpin_out;
 	}
 
-	if (ring != &dev_priv->ring[RCS]) {
+	if (engine != &dev_priv->engine[RCS]) {
 		if (from)
 			i915_gem_context_unreference(from);
 		goto done;
@@ -590,7 +590,7 @@ static int do_switch(struct intel_engine_cs *ring,
 	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
 
-	ret = mi_set_context(ring, to, hw_flags);
+	ret = mi_set_context(engine, to, hw_flags);
 	if (ret)
 		goto unpin_out;
 
@@ -598,7 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
 		if (!(to->remap_slice & (1<<i)))
 			continue;
 
-		ret = i915_gem_l3_remap(ring, i);
+		ret = i915_gem_l3_remap(engine, i);
 		/* If it failed, try again next round */
 		if (ret)
 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -614,7 +614,7 @@ static int do_switch(struct intel_engine_cs *ring,
 	 */
 	if (from != NULL) {
 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), engine);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -623,7 +623,7 @@ static int do_switch(struct intel_engine_cs *ring,
 		 * swapped, but there is no way to do that yet.
 		 */
 		from->legacy_hw_ctx.rcs_state->dirty = 1;
-		BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
+		BUG_ON(from->legacy_hw_ctx.rcs_state->ring != engine);
 
 		/* obj is kept alive until the next request by its active ref */
 		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
@@ -635,10 +635,10 @@ static int do_switch(struct intel_engine_cs *ring,
 
 done:
 	i915_gem_context_reference(to);
-	ring->last_context = to;
+	engine->last_context = to;
 
 	if (uninitialized) {
-		ret = i915_gem_render_state_init(ring);
+		ret = i915_gem_render_state_init(engine);
 		if (ret)
 			DRM_ERROR("init render state: %d\n", ret);
 	}
@@ -646,7 +646,7 @@ done:
 	return 0;
 
 unpin_out:
-	if (ring->id == RCS)
+	if (engine->id == RCS)
 		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
 	return ret;
 }
@@ -661,24 +661,24 @@ unpin_out:
  * it will have a refoucnt > 1. This allows us to destroy the context abstract
  * object while letting the normal object tracking destroy the backing BO.
  */
-int i915_switch_context(struct intel_engine_cs *ring,
+int i915_switch_context(struct intel_engine_cs *engine,
 			struct intel_context *to)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
 	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-		if (to != ring->last_context) {
+		if (to != engine->last_context) {
 			i915_gem_context_reference(to);
-			if (ring->last_context)
-				i915_gem_context_unreference(ring->last_context);
-			ring->last_context = to;
+			if (engine->last_context)
+				i915_gem_context_unreference(engine->last_context);
+			engine->last_context = to;
 		}
 		return 0;
 	}
 
-	return do_switch(ring, to);
+	return do_switch(engine, to);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c9016c4..cbdae18 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -521,7 +521,7 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 
 static int
 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
-				struct intel_engine_cs *ring,
+				struct intel_engine_cs *engine,
 				bool *need_reloc)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
@@ -610,7 +610,7 @@ eb_vma_misplaced(struct i915_vma *vma)
 }
 
 static int
-i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
 			    struct list_head *vmas,
 			    bool *need_relocs)
 {
@@ -618,10 +618,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 	struct i915_vma *vma;
 	struct i915_address_space *vm;
 	struct list_head ordered_vmas;
-	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
 	int retry;
 
-	i915_gem_retire_requests_ring(ring);
+	i915_gem_retire_requests__engine(engine);
 
 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
@@ -676,7 +676,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 			if (eb_vma_misplaced(vma))
 				ret = i915_vma_unbind(vma);
 			else
-				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+				ret = i915_gem_execbuffer_reserve_vma(vma, engine, need_relocs);
 			if (ret)
 				goto err;
 		}
@@ -686,7 +686,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 			if (drm_mm_node_allocated(&vma->node))
 				continue;
 
-			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+			ret = i915_gem_execbuffer_reserve_vma(vma, engine, need_relocs);
 			if (ret)
 				goto err;
 		}
@@ -709,7 +709,7 @@ static int
 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 				  struct drm_i915_gem_execbuffer2 *args,
 				  struct drm_file *file,
-				  struct intel_engine_cs *ring,
+				  struct intel_engine_cs *engine,
 				  struct eb_vmas *eb,
 				  struct drm_i915_gem_exec_object2 *exec)
 {
@@ -797,7 +797,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 		goto err;
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -822,7 +822,7 @@ err:
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *engine,
 				struct list_head *vmas)
 {
 	struct i915_vma *vma;
@@ -832,7 +832,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
-		ret = i915_gem_object_sync(obj, ring);
+		ret = i915_gem_object_sync(obj, engine);
 		if (ret)
 			return ret;
 
@@ -843,7 +843,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(ring->dev);
+		i915_gem_chipset_flush(engine->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -851,7 +851,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return intel_ring_invalidate_all_caches(ring);
+	return intel_engine_invalidate_all_caches(engine);
 }
 
 static bool
@@ -913,12 +913,12 @@ validate_exec_list(struct drm_device *dev,
 
 static struct intel_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
-			  struct intel_engine_cs *ring, const u32 ctx_id)
+			  struct intel_engine_cs *engine, const u32 ctx_id)
 {
 	struct intel_context *ctx = NULL;
 	struct i915_ctx_hang_stats *hs;
 
-	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
+	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
 		return ERR_PTR(-EINVAL);
 
 	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -931,8 +931,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 		return ERR_PTR(-EIO);
 	}
 
-	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
-		int ret = intel_lr_context_deferred_create(ctx, ring);
+	if (i915.enable_execlists && !ctx->ring[engine->id].state) {
+		int ret = intel_lr_context_deferred_create(ctx, engine);
 		if (ret) {
 			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
 			return ERR_PTR(ret);
@@ -944,9 +944,9 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 
 void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-				   struct intel_engine_cs *ring)
+				   struct intel_engine_cs *engine)
 {
-	u32 seqno = intel_ring_get_seqno(ring);
+	u32 seqno = intel_engine_get_seqno(engine);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -960,12 +960,12 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, ring);
+		i915_vma_move_to_active(vma, engine);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->last_write_seqno = seqno;
 
-			intel_fb_obj_invalidate(obj, ring);
+			intel_fb_obj_invalidate(obj, engine);
 
 			/* update for the implicit flush after a batch */
 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -973,7 +973,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			obj->last_fenced_seqno = seqno;
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = to_i915(ring->dev);
+				struct drm_i915_private *dev_priv = to_i915(engine->dev);
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
 					       &dev_priv->mm.fence_list);
 			}
@@ -986,45 +986,45 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 void
 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
 				    struct drm_file *file,
-				    struct intel_engine_cs *ring,
+				    struct intel_engine_cs *engine,
 				    struct drm_i915_gem_object *obj)
 {
 	/* Unconditionally force add_request to emit a full flush. */
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
-	(void)__i915_add_request(ring, file, obj, NULL);
+	(void)__i915_add_request(engine, file, obj, NULL);
 }
 
 static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct intel_engine_cs *ring)
+			    struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret, i;
 
-	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(ring, 4 * 3);
+	ret = intel_ring_begin(engine, 4 * 3);
 	if (ret)
 		return ret;
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(engine, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(engine, 0);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
 
 static int
-i915_emit_box(struct intel_engine_cs *ring,
+i915_emit_box(struct intel_engine_cs *engine,
 	      struct drm_clip_rect *box,
 	      int DR1, int DR4)
 {
@@ -1037,28 +1037,28 @@ i915_emit_box(struct intel_engine_cs *ring,
 		return -EINVAL;
 	}
 
-	if (INTEL_INFO(ring->dev)->gen >= 4) {
-		ret = intel_ring_begin(ring, 4);
+	if (INTEL_INFO(engine->dev)->gen >= 4) {
+		ret = intel_ring_begin(engine, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
-		intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
-		intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
-		intel_ring_emit(ring, DR4);
+		intel_ring_emit(engine, GFX_OP_DRAWRECT_INFO_I965);
+		intel_ring_emit(engine, (box->x1 & 0xffff) | box->y1 << 16);
+		intel_ring_emit(engine, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+		intel_ring_emit(engine, DR4);
 	} else {
-		ret = intel_ring_begin(ring, 6);
+		ret = intel_ring_begin(engine, 6);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
-		intel_ring_emit(ring, DR1);
-		intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
-		intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
-		intel_ring_emit(ring, DR4);
-		intel_ring_emit(ring, 0);
+		intel_ring_emit(engine, GFX_OP_DRAWRECT_INFO);
+		intel_ring_emit(engine, DR1);
+		intel_ring_emit(engine, (box->x1 & 0xffff) | box->y1 << 16);
+		intel_ring_emit(engine, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+		intel_ring_emit(engine, DR4);
+		intel_ring_emit(engine, 0);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1066,7 +1066,7 @@ i915_emit_box(struct intel_engine_cs *ring,
 
 int
 i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
+			       struct intel_engine_cs *engine,
 			       struct intel_context *ctx,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas,
@@ -1081,7 +1081,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 	int i, ret = 0;
 
 	if (args->num_cliprects != 0) {
-		if (ring != &dev_priv->ring[RCS]) {
+		if (engine != &dev_priv->engine[RCS]) {
 			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
 			return -EINVAL;
 		}
@@ -1123,11 +1123,11 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 		}
 	}
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+	ret = i915_gem_execbuffer_move_to_gpu(engine, vmas);
 	if (ret)
 		goto error;
 
-	ret = i915_switch_context(ring, ctx);
+	ret = i915_switch_context(engine, ctx);
 	if (ret)
 		goto error;
 
@@ -1137,7 +1137,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			ret = -EINVAL;
 			goto error;
@@ -1168,23 +1168,23 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 		goto error;
 	}
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (engine == &dev_priv->engine[RCS] &&
 			instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(engine, 4);
 		if (ret)
 			goto error;
 
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(engine, INSTPM);
+		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
+		intel_ring_advance(engine);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, ring);
+		ret = i915_reset_gen7_sol_offsets(dev, engine);
 		if (ret)
 			goto error;
 	}
@@ -1192,29 +1192,29 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 	exec_len = args->batch_len;
 	if (cliprects) {
 		for (i = 0; i < args->num_cliprects; i++) {
-			ret = i915_emit_box(ring, &cliprects[i],
+			ret = i915_emit_box(engine, &cliprects[i],
 					    args->DR1, args->DR4);
 			if (ret)
 				goto error;
 
-			ret = ring->dispatch_execbuffer(ring,
+			ret = engine->dispatch_execbuffer(engine,
 							exec_start, exec_len,
 							flags);
 			if (ret)
 				goto error;
 		}
 	} else {
-		ret = ring->dispatch_execbuffer(ring,
+		ret = engine->dispatch_execbuffer(engine,
 						exec_start, exec_len,
 						flags);
 		if (ret)
 			return ret;
 	}
 
-	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+	trace_i915_gem_ring_dispatch(engine, intel_engine_get_seqno(engine), flags);
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, engine);
+	i915_gem_execbuffer_retire_commands(dev, file, engine, batch_obj);
 
 error:
 	kfree(cliprects);
@@ -1225,15 +1225,15 @@ error:
  * Find one BSD ring to dispatch the corresponding BSD command.
  * The Ring ID is returned.
  */
-static int gen8_dispatch_bsd_ring(struct drm_device *dev,
+static int gen8_dispatch_bsd_engine(struct drm_device *dev,
 				  struct drm_file *file)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 	/* Check whether the file_priv is using one ring */
-	if (file_priv->bsd_ring)
-		return file_priv->bsd_ring->id;
+	if (file_priv->bsd_engine)
+		return file_priv->bsd_engine->id;
 	else {
 		/* If no, use the ping-pong mechanism to select one ring */
 		int ring_id;
@@ -1246,7 +1246,7 @@ static int gen8_dispatch_bsd_ring(struct drm_device *dev,
 			ring_id = VCS2;
 			dev_priv->mm.bsd_ring_dispatch_index = 0;
 		}
-		file_priv->bsd_ring = &dev_priv->ring[ring_id];
+		file_priv->bsd_engine = &dev_priv->engine[ring_id];
 		mutex_unlock(&dev->struct_mutex);
 		return ring_id;
 	}
@@ -1280,7 +1280,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
 	struct i915_address_space *vm;
 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
@@ -1313,18 +1313,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 
 	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
-		ring = &dev_priv->ring[RCS];
+		engine = &dev_priv->engine[RCS];
 	else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
 		if (HAS_BSD2(dev)) {
 			int ring_id;
-			ring_id = gen8_dispatch_bsd_ring(dev, file);
-			ring = &dev_priv->ring[ring_id];
+			ring_id = gen8_dispatch_bsd_engine(dev, file);
+			engine = &dev_priv->engine[ring_id];
 		} else
-			ring = &dev_priv->ring[VCS];
+			engine = &dev_priv->engine[VCS];
 	} else
-		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
+		engine = &dev_priv->engine[(args->flags & I915_EXEC_RING_MASK) - 1];
 
-	if (!intel_ring_initialized(ring)) {
+	if (!intel_engine_initialized(engine)) {
 		DRM_DEBUG("execbuf with invalid ring: %d\n",
 			  (int)(args->flags & I915_EXEC_RING_MASK));
 		return -EINVAL;
@@ -1347,7 +1347,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 	}
 
-	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
 	if (IS_ERR(ctx)) {
 		mutex_unlock(&dev->struct_mutex);
 		ret = PTR_ERR(ctx);
@@ -1379,7 +1379,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1388,7 +1388,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		ret = i915_gem_execbuffer_relocate(eb);
 	if (ret) {
 		if (ret == -EFAULT) {
-			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, engine,
 								eb, exec);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
@@ -1404,8 +1404,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
-	if (i915_needs_cmd_parser(ring)) {
-		ret = i915_parse_cmds(ring,
+	if (i915_needs_cmd_parser(engine)) {
+		ret = i915_parse_cmds(engine,
 				      batch_obj,
 				      args->batch_start_offset,
 				      file->is_master);
@@ -1444,7 +1444,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	} else
 		exec_start += i915_gem_obj_offset(batch_obj, vm);
 
-	ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
+	ret = dev_priv->gt.do_execbuf(dev, file, engine, ctx, args,
 				      &eb->vmas, batch_obj, exec_start, flags);
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4db2370..8574cb8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -203,37 +203,37 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
 }
 
 /* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
+static int gen8_write_pdp(struct intel_engine_cs *engine, unsigned entry,
 			   uint64_t val, bool synchronous)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	int ret;
 
 	BUG_ON(entry >= 4);
 
 	if (synchronous) {
-		I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
-		I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
+		I915_WRITE(GEN8_RING_PDP_UDW(engine, entry), val >> 32);
+		I915_WRITE(GEN8_RING_PDP_LDW(engine, entry), (u32)val);
 		return 0;
 	}
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
-	intel_ring_emit(ring, (u32)(val >> 32));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
-	intel_ring_emit(ring, (u32)(val));
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(engine, GEN8_RING_PDP_UDW(engine, entry));
+	intel_ring_emit(engine, (u32)(val >> 32));
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(engine, GEN8_RING_PDP_LDW(engine, entry));
+	intel_ring_emit(engine, (u32)(val));
+	intel_ring_advance(engine);
 
 	return 0;
 }
 
 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring,
+			  struct intel_engine_cs *engine,
 			  bool synchronous)
 {
 	int i, ret;
@@ -243,7 +243,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
 	for (i = used_pd - 1; i >= 0; i--) {
 		dma_addr_t addr = ppgtt->pd_dma_addr[i];
-		ret = gen8_write_pdp(ring, i, addr, synchronous);
+		ret = gen8_write_pdp(engine, i, addr, synchronous);
 		if (ret)
 			return ret;
 	}
@@ -708,7 +708,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			 struct intel_engine_cs *ring,
+			 struct intel_engine_cs *engine,
 			 bool synchronous)
 {
 	struct drm_device *dev = ppgtt->base.dev;
@@ -725,34 +725,34 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (synchronous ||
 	    i915_reset_in_progress(&dev_priv->gpu_error)) {
 		WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-		I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-		POSTING_READ(RING_PP_DIR_BASE(ring));
+		I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+		I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
+		POSTING_READ(RING_PP_DIR_BASE(engine));
 		return 0;
 	}
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(engine, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit(engine, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(engine, PP_DIR_DCLV_2G);
+	intel_ring_emit(engine, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(engine, get_pd_offset(ppgtt));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring,
+			  struct intel_engine_cs *engine,
 			  bool synchronous)
 {
 	struct drm_device *dev = ppgtt->base.dev;
@@ -769,32 +769,32 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (synchronous ||
 	    i915_reset_in_progress(&dev_priv->gpu_error)) {
 		WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-		I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-		POSTING_READ(RING_PP_DIR_BASE(ring));
+		I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+		I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
+		POSTING_READ(RING_PP_DIR_BASE(engine));
 		return 0;
 	}
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(engine, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit(engine, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(engine, PP_DIR_DCLV_2G);
+	intel_ring_emit(engine, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(engine, get_pd_offset(ppgtt));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
-	if (ring->id != RCS) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (engine->id != RCS) {
+		ret = engine->flush(engine, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -803,7 +803,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring,
+			  struct intel_engine_cs *engine,
 			  bool synchronous)
 {
 	struct drm_device *dev = ppgtt->base.dev;
@@ -812,10 +812,10 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (!synchronous)
 		return 0;
 
-	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
 
-	POSTING_READ(RING_PP_DIR_DCLV(ring));
+	POSTING_READ(RING_PP_DIR_DCLV(engine));
 
 	return 0;
 }
@@ -823,7 +823,7 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static void gen8_ppgtt_enable(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int j;
 
 	/* In the case of execlists, PPGTT is enabled by the context descriptor
@@ -832,8 +832,8 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
 	if (i915.enable_execlists)
 		return;
 
-	for_each_ring(ring, dev_priv, j) {
-		I915_WRITE(RING_MODE_GEN7(ring),
+	for_each_engine(engine, dev_priv, j) {
+		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 	}
 }
@@ -841,7 +841,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
 static void gen7_ppgtt_enable(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t ecochk, ecobits;
 	int i;
 
@@ -857,9 +857,9 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
 	}
 	I915_WRITE(GAM_ECOCHK, ecochk);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv, i) {
 		/* GFX_MODE is per-ring on gen7+ */
-		I915_WRITE(RING_MODE_GEN7(ring),
+		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 	}
 }
@@ -1171,7 +1171,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 	int i, ret = 0;
 
@@ -1188,8 +1188,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 		WARN_ON(1);
 
 	if (ppgtt) {
-		for_each_ring(ring, dev_priv, i) {
-			ret = ppgtt->switch_mm(ppgtt, ring, true);
+		for_each_engine(engine, dev_priv, i) {
+			ret = ppgtt->switch_mm(ppgtt, engine, true);
 			if (ret != 0)
 				return ret;
 		}
@@ -1296,15 +1296,15 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 void i915_check_and_clear_faults(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
 	if (INTEL_INFO(dev)->gen < 6)
 		return;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv, i) {
 		u32 fault_reg;
-		fault_reg = I915_READ(RING_FAULT_REG(ring));
+		fault_reg = I915_READ(RING_FAULT_REG(engine));
 		if (fault_reg & RING_FAULT_VALID) {
 			DRM_DEBUG_DRIVER("Unexpected fault\n"
 					 "\tAddr: 0x%08lx\\n"
@@ -1315,11 +1315,11 @@ void i915_check_and_clear_faults(struct drm_device *dev)
 					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
 					 RING_FAULT_SRCID(fault_reg),
 					 RING_FAULT_FAULT_TYPE(fault_reg));
-			I915_WRITE(RING_FAULT_REG(ring),
+			I915_WRITE(RING_FAULT_REG(engine),
 				   fault_reg & ~RING_FAULT_VALID);
 		}
 	}
-	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+	POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
 }
 
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 1e05414..285d72d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -220,7 +220,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 	}
 }
 
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
 {
 	switch (a) {
 	case HANGCHECK_IDLE:
@@ -408,7 +408,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 		obj = error->ring[i].batchbuffer;
 		if (obj) {
-			err_puts(m, dev_priv->ring[i].name);
+			err_puts(m, dev_priv->engine[i].name);
 			if (error->ring[i].pid != -1)
 				err_printf(m, " (submitted by %s [%d])",
 					   error->ring[i].comm,
@@ -421,13 +421,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 		obj = error->ring[i].wa_batchbuffer;
 		if (obj) {
 			err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
-				   dev_priv->ring[i].name, obj->gtt_offset);
+				   dev_priv->engine[i].name, obj->gtt_offset);
 			print_error_obj(m, obj);
 		}
 
 		if (error->ring[i].num_requests) {
 			err_printf(m, "%s --- %d requests\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   error->ring[i].num_requests);
 			for (j = 0; j < error->ring[i].num_requests; j++) {
 				err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
@@ -439,14 +439,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 		if ((obj = error->ring[i].ringbuffer)) {
 			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   obj->gtt_offset);
 			print_error_obj(m, obj);
 		}
 
 		if ((obj = error->ring[i].hws_page)) {
 			err_printf(m, "%s --- HW Status = 0x%08x\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   obj->gtt_offset);
 			offset = 0;
 			for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
@@ -462,7 +462,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 		if ((obj = error->ring[i].ctx)) {
 			err_printf(m, "%s --- HW Context = 0x%08x\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   obj->gtt_offset);
 			print_error_obj(m, obj);
 		}
@@ -743,7 +743,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
 	 * synchronization commands which almost always appear in the case
 	 * strictly a client bug. Use instdone to differentiate those some.
 	 */
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
 			if (ring_id)
 				*ring_id = i;
@@ -791,7 +791,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
 
 static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 					struct drm_i915_error_state *error,
-					struct intel_engine_cs *ring,
+					struct intel_engine_cs *engine,
 					struct drm_i915_error_ring *ering)
 {
 	struct intel_engine_cs *to;
@@ -806,68 +806,68 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 						 dev_priv->semaphore_obj,
 						 &dev_priv->gtt.base);
 
-	for_each_ring(to, dev_priv, i) {
+	for_each_engine(to, dev_priv, i) {
 		int idx;
 		u16 signal_offset;
 		u32 *tmp;
 
-		if (ring == to)
+		if (engine == to)
 			continue;
 
-		signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+		signal_offset = (GEN8_SIGNAL_OFFSET(engine, i) & (PAGE_SIZE - 1))
 				/ 4;
 		tmp = error->semaphore_obj->pages[0];
-		idx = intel_ring_sync_index(ring, to);
+		idx = intel_engine_sync_index(engine, to);
 
 		ering->semaphore_mboxes[idx] = tmp[signal_offset];
-		ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+		ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
 	}
 }
 
 static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
-					struct intel_engine_cs *ring,
+					struct intel_engine_cs *engine,
 					struct drm_i915_error_ring *ering)
 {
-	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
-	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
-	ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
-	ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+	ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+	ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
 
 	if (HAS_VEBOX(dev_priv->dev)) {
 		ering->semaphore_mboxes[2] =
-			I915_READ(RING_SYNC_2(ring->mmio_base));
-		ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+			I915_READ(RING_SYNC_2(engine->mmio_base));
+		ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
 	}
 }
 
 static void i915_record_ring_state(struct drm_device *dev,
 				   struct drm_i915_error_state *error,
-				   struct intel_engine_cs *ring,
+				   struct intel_engine_cs *engine,
 				   struct drm_i915_error_ring *ering)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
-		ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+		ering->rc_psmi = I915_READ(engine->mmio_base + 0x50);
+		ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
 		if (INTEL_INFO(dev)->gen >= 8)
-			gen8_record_semaphore_state(dev_priv, error, ring, ering);
+			gen8_record_semaphore_state(dev_priv, error, engine, ering);
 		else
-			gen6_record_semaphore_state(dev_priv, ring, ering);
+			gen6_record_semaphore_state(dev_priv, engine, ering);
 	}
 
 	if (INTEL_INFO(dev)->gen >= 4) {
-		ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
-		ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
-		ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-		ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
-		ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
-		ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+		ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+		ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+		ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+		ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+		ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+		ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
 		if (INTEL_INFO(dev)->gen >= 8) {
-			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
-			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
 		}
-		ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+		ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
 	} else {
 		ering->faddr = I915_READ(DMA_FADD_I8XX);
 		ering->ipeir = I915_READ(IPEIR);
@@ -875,19 +875,19 @@ static void i915_record_ring_state(struct drm_device *dev,
 		ering->instdone = I915_READ(INSTDONE);
 	}
 
-	ering->waiting = waitqueue_active(&ring->irq_queue);
-	ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
-	ering->seqno = ring->get_seqno(ring, false);
-	ering->acthd = intel_ring_get_active_head(ring);
-	ering->head = I915_READ_HEAD(ring);
-	ering->tail = I915_READ_TAIL(ring);
-	ering->ctl = I915_READ_CTL(ring);
+	ering->waiting = waitqueue_active(&engine->irq_queue);
+	ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+	ering->seqno = engine->get_seqno(engine, false);
+	ering->acthd = intel_engine_get_active_head(engine);
+	ering->head = I915_READ_HEAD(engine);
+	ering->tail = I915_READ_TAIL(engine);
+	ering->ctl = I915_READ_CTL(engine);
 
 	if (I915_NEED_GFX_HWS(dev)) {
 		int mmio;
 
 		if (IS_GEN7(dev)) {
-			switch (ring->id) {
+			switch (engine->id) {
 			default:
 			case RCS:
 				mmio = RENDER_HWS_PGA_GEN7;
@@ -902,59 +902,59 @@ static void i915_record_ring_state(struct drm_device *dev,
 				mmio = VEBOX_HWS_PGA_GEN7;
 				break;
 			}
-		} else if (IS_GEN6(ring->dev)) {
-			mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+		} else if (IS_GEN6(engine->dev)) {
+			mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
 		} else {
 			/* XXX: gen8 returns to sanity */
-			mmio = RING_HWS_PGA(ring->mmio_base);
+			mmio = RING_HWS_PGA(engine->mmio_base);
 		}
 
 		ering->hws = I915_READ(mmio);
 	}
 
-	ering->cpu_ring_head = ring->buffer->head;
-	ering->cpu_ring_tail = ring->buffer->tail;
+	ering->cpu_ring_head = engine->buffer->head;
+	ering->cpu_ring_tail = engine->buffer->tail;
 
-	ering->hangcheck_score = ring->hangcheck.score;
-	ering->hangcheck_action = ring->hangcheck.action;
+	ering->hangcheck_score = engine->hangcheck.score;
+	ering->hangcheck_action = engine->hangcheck.action;
 
 	if (USES_PPGTT(dev)) {
 		int i;
 
-		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
 		switch (INTEL_INFO(dev)->gen) {
 		case 8:
 			for (i = 0; i < 4; i++) {
 				ering->vm_info.pdp[i] =
-					I915_READ(GEN8_RING_PDP_UDW(ring, i));
+					I915_READ(GEN8_RING_PDP_UDW(engine, i));
 				ering->vm_info.pdp[i] <<= 32;
 				ering->vm_info.pdp[i] |=
-					I915_READ(GEN8_RING_PDP_LDW(ring, i));
+					I915_READ(GEN8_RING_PDP_LDW(engine, i));
 			}
 			break;
 		case 7:
 			ering->vm_info.pp_dir_base =
-				I915_READ(RING_PP_DIR_BASE(ring));
+				I915_READ(RING_PP_DIR_BASE(engine));
 			break;
 		case 6:
 			ering->vm_info.pp_dir_base =
-				I915_READ(RING_PP_DIR_BASE_READ(ring));
+				I915_READ(RING_PP_DIR_BASE_READ(engine));
 			break;
 		}
 	}
 }
 
 
-static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *engine,
 					   struct drm_i915_error_state *error,
 					   struct drm_i915_error_ring *ering)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
 	/* Currently render ring is the only HW context user */
-	if (ring->id != RCS || !error->ccid)
+	if (engine->id != RCS || !error->ccid)
 		return;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -975,19 +975,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
 	struct drm_i915_gem_request *request;
 	int i, count;
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct intel_engine_cs *engine = &dev_priv->engine[i];
 
 		error->ring[i].pid = -1;
 
-		if (ring->dev == NULL)
+		if (engine->dev == NULL)
 			continue;
 
 		error->ring[i].valid = true;
 
-		i915_record_ring_state(dev, error, ring, &error->ring[i]);
+		i915_record_ring_state(dev, error, engine, &error->ring[i]);
 
-		request = i915_gem_find_active_request(ring);
+		request = i915_gem_find_active_request(engine);
 		if (request) {
 			struct i915_address_space *vm;
 
@@ -1007,7 +1007,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
 			if (HAS_BROKEN_CS_TLB(dev_priv->dev))
 				error->ring[i].wa_batchbuffer =
 					i915_error_ggtt_object_create(dev_priv,
-							     ring->scratch.obj);
+							     engine->scratch.obj);
 
 			if (request->file_priv) {
 				struct task_struct *task;
@@ -1024,15 +1024,15 @@ static void i915_gem_record_rings(struct drm_device *dev,
 		}
 
 		error->ring[i].ringbuffer =
-			i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
+			i915_error_ggtt_object_create(dev_priv, engine->buffer->obj);
 
 		error->ring[i].hws_page =
-			i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+			i915_error_ggtt_object_create(dev_priv, engine->status_page.obj);
 
-		i915_gem_record_active_context(ring, error, &error->ring[i]);
+		i915_gem_record_active_context(engine, error, &error->ring[i]);
 
 		count = 0;
-		list_for_each_entry(request, &ring->request_list, list)
+		list_for_each_entry(request, &engine->request_list, list)
 			count++;
 
 		error->ring[i].num_requests = count;
@@ -1045,7 +1045,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
 		}
 
 		count = 0;
-		list_for_each_entry(request, &ring->request_list, list) {
+		list_for_each_entry(request, &engine->request_list, list) {
 			struct drm_i915_error_request *erq;
 
 			erq = &error->ring[i].requests[count++];
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0dca371..2973c00 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1256,17 +1256,17 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 }
 
 static void notify_ring(struct drm_device *dev,
-			struct intel_engine_cs *ring)
+			struct intel_engine_cs *engine)
 {
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	trace_i915_gem_request_complete(ring);
+	trace_i915_gem_request_complete(engine);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		intel_notify_mmio_flip(ring);
+		intel_notify_mmio_flip(engine);
 
-	wake_up_all(&ring->irq_queue);
+	wake_up_all(&engine->irq_queue);
 	i915_queue_hangcheck(dev);
 }
 
@@ -1584,9 +1584,9 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
 {
 	if (gt_iir &
 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-		notify_ring(dev, &dev_priv->ring[RCS]);
+		notify_ring(dev, &dev_priv->engine[RCS]);
 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->ring[VCS]);
+		notify_ring(dev, &dev_priv->engine[VCS]);
 }
 
 static void snb_gt_irq_handler(struct drm_device *dev,
@@ -1596,11 +1596,11 @@ static void snb_gt_irq_handler(struct drm_device *dev,
 
 	if (gt_iir &
 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-		notify_ring(dev, &dev_priv->ring[RCS]);
+		notify_ring(dev, &dev_priv->engine[RCS]);
 	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->ring[VCS]);
+		notify_ring(dev, &dev_priv->engine[VCS]);
 	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->ring[BCS]);
+		notify_ring(dev, &dev_priv->engine[BCS]);
 
 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
 		      GT_BSD_CS_ERROR_INTERRUPT |
@@ -1630,7 +1630,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
 				       struct drm_i915_private *dev_priv,
 				       u32 master_ctl)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 rcs, bcs, vcs;
 	uint32_t tmp = 0;
 	irqreturn_t ret = IRQ_NONE;
@@ -1642,18 +1642,18 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
 			ret = IRQ_HANDLED;
 
 			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
-			ring = &dev_priv->ring[RCS];
+			engine = &dev_priv->engine[RCS];
 			if (rcs & GT_RENDER_USER_INTERRUPT)
-				notify_ring(dev, ring);
+				notify_ring(dev, engine);
 			if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
-				intel_execlists_handle_ctx_events(ring);
+				intel_execlists_handle_ctx_events(engine);
 
 			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
-			ring = &dev_priv->ring[BCS];
+			engine = &dev_priv->engine[BCS];
 			if (bcs & GT_RENDER_USER_INTERRUPT)
-				notify_ring(dev, ring);
+				notify_ring(dev, engine);
 			if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
-				intel_execlists_handle_ctx_events(ring);
+				intel_execlists_handle_ctx_events(engine);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
 	}
@@ -1665,18 +1665,18 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
 			ret = IRQ_HANDLED;
 
 			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
-			ring = &dev_priv->ring[VCS];
+			engine = &dev_priv->engine[VCS];
 			if (vcs & GT_RENDER_USER_INTERRUPT)
-				notify_ring(dev, ring);
+				notify_ring(dev, engine);
 			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-				intel_execlists_handle_ctx_events(ring);
+				intel_execlists_handle_ctx_events(engine);
 
 			vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
-			ring = &dev_priv->ring[VCS2];
+			engine = &dev_priv->engine[VCS2];
 			if (vcs & GT_RENDER_USER_INTERRUPT)
-				notify_ring(dev, ring);
+				notify_ring(dev, engine);
 			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-				intel_execlists_handle_ctx_events(ring);
+				intel_execlists_handle_ctx_events(engine);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
 	}
@@ -1699,11 +1699,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
 			ret = IRQ_HANDLED;
 
 			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
-			ring = &dev_priv->ring[VECS];
+			engine = &dev_priv->engine[VECS];
 			if (vcs & GT_RENDER_USER_INTERRUPT)
-				notify_ring(dev, ring);
+				notify_ring(dev, engine);
 			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-				intel_execlists_handle_ctx_events(ring);
+				intel_execlists_handle_ctx_events(engine);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
 	}
@@ -2000,7 +2000,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 
 	if (HAS_VEBOX(dev_priv->dev)) {
 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+			notify_ring(dev_priv->dev, &dev_priv->engine[VECS]);
 
 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
 			i915_handle_error(dev_priv->dev, false,
@@ -2629,7 +2629,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 			       bool reset_completed)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
 	/*
@@ -2640,8 +2640,8 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 	 */
 
 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
-	for_each_ring(ring, dev_priv, i)
-		wake_up_all(&ring->irq_queue);
+	for_each_engine(engine, dev_priv, i)
+		wake_up_all(&engine->irq_queue);
 
 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
 	wake_up_all(&dev_priv->pending_flip_queue);
@@ -3055,17 +3055,17 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
 }
 
 static u32
-ring_last_seqno(struct intel_engine_cs *ring)
+engine_last_seqno(struct intel_engine_cs *engine)
 {
-	return list_entry(ring->request_list.prev,
+	return list_entry(engine->request_list.prev,
 			  struct drm_i915_gem_request, list)->seqno;
 }
 
 static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+engine_idle(struct intel_engine_cs *engine, u32 seqno)
 {
-	return (list_empty(&ring->request_list) ||
-		i915_seqno_passed(seqno, ring_last_seqno(ring)));
+	return (list_empty(&engine->request_list) ||
+		i915_seqno_passed(seqno, engine_last_seqno(engine)));
 }
 
 static bool
@@ -3081,48 +3081,48 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
 }
 
 static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
+semaphore_wait_to_signaller_engine(struct intel_engine_cs *engine, u32 ipehr, u64 offset)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct intel_engine_cs *signaller;
 	int i;
 
 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
-		for_each_ring(signaller, dev_priv, i) {
-			if (ring == signaller)
+		for_each_engine(signaller, dev_priv, i) {
+			if (engine == signaller)
 				continue;
 
-			if (offset == signaller->semaphore.signal_ggtt[ring->id])
+			if (offset == signaller->semaphore.signal_ggtt[engine->id])
 				return signaller;
 		}
 	} else {
 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
-		for_each_ring(signaller, dev_priv, i) {
-			if(ring == signaller)
+		for_each_engine(signaller, dev_priv, i) {
+			if(engine == signaller)
 				continue;
 
-			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+			if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
 				return signaller;
 		}
 	}
 
 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
-		  ring->id, ipehr, offset);
+		  engine->id, ipehr, offset);
 
 	return NULL;
 }
 
 static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	u32 cmd, ipehr, head;
 	u64 offset = 0;
 	int i, backwards;
 
-	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+	if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
 		return NULL;
 
 	/*
@@ -3133,8 +3133,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 	 * point at at batch, and semaphores are always emitted into the
 	 * ringbuffer itself.
 	 */
-	head = I915_READ_HEAD(ring) & HEAD_ADDR;
-	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
+	head = I915_READ_HEAD(engine) & HEAD_ADDR;
+	backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
 
 	for (i = backwards; i; --i) {
 		/*
@@ -3142,10 +3142,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 		 * our ring is smaller than what the hardware (and hence
 		 * HEAD_ADDR) allows. Also handles wrap-around.
 		 */
-		head &= ring->buffer->size - 1;
+		head &= engine->buffer->size - 1;
 
 		/* This here seems to blow up */
-		cmd = ioread32(ring->buffer->virtual_start + head);
+		cmd = ioread32(engine->buffer->virtual_start + head);
 		if (cmd == ipehr)
 			break;
 
@@ -3155,29 +3155,29 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 	if (!i)
 		return NULL;
 
-	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
-	if (INTEL_INFO(ring->dev)->gen >= 8) {
-		offset = ioread32(ring->buffer->virtual_start + head + 12);
+	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+	if (INTEL_INFO(engine->dev)->gen >= 8) {
+		offset = ioread32(engine->buffer->virtual_start + head + 12);
 		offset <<= 32;
-		offset = ioread32(ring->buffer->virtual_start + head + 8);
+		offset = ioread32(engine->buffer->virtual_start + head + 8);
 	}
-	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
+	return semaphore_wait_to_signaller_engine(engine, ipehr, offset);
 }
 
-static int semaphore_passed(struct intel_engine_cs *ring)
+static int semaphore_passed(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct intel_engine_cs *signaller;
 	u32 seqno;
 
-	ring->hangcheck.deadlock++;
+	engine->hangcheck.deadlock++;
 
-	signaller = semaphore_waits_for(ring, &seqno);
+	signaller = semaphore_waits_for(engine, &seqno);
 	if (signaller == NULL)
 		return -1;
 
 	/* Prevent pathological recursion due to driver bugs */
-	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
 		return -1;
 
 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
@@ -3193,23 +3193,23 @@ static int semaphore_passed(struct intel_engine_cs *ring)
 
 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
-		ring->hangcheck.deadlock = 0;
+	for_each_engine(engine, dev_priv, i)
+		engine->hangcheck.deadlock = 0;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp;
 
-	if (acthd != ring->hangcheck.acthd) {
-		if (acthd > ring->hangcheck.max_acthd) {
-			ring->hangcheck.max_acthd = acthd;
+	if (acthd != engine->hangcheck.acthd) {
+		if (acthd > engine->hangcheck.max_acthd) {
+			engine->hangcheck.max_acthd = acthd;
 			return HANGCHECK_ACTIVE;
 		}
 
@@ -3224,24 +3224,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
 	 * and break the hang. This should work on
 	 * all but the second generation chipsets.
 	 */
-	tmp = I915_READ_CTL(ring);
+	tmp = I915_READ_CTL(engine);
 	if (tmp & RING_WAIT) {
 		i915_handle_error(dev, false,
 				  "Kicking stuck wait on %s",
-				  ring->name);
-		I915_WRITE_CTL(ring, tmp);
+				  engine->name);
+		I915_WRITE_CTL(engine, tmp);
 		return HANGCHECK_KICK;
 	}
 
 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
-		switch (semaphore_passed(ring)) {
+		switch (semaphore_passed(engine)) {
 		default:
 			return HANGCHECK_HUNG;
 		case 1:
 			i915_handle_error(dev, false,
 					  "Kicking stuck semaphore on %s",
-					  ring->name);
-			I915_WRITE_CTL(ring, tmp);
+					  engine->name);
+			I915_WRITE_CTL(engine, tmp);
 			return HANGCHECK_KICK;
 		case 0:
 			return HANGCHECK_WAIT;
@@ -3253,7 +3253,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
 
 /**
  * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
+ * batchbuffers in a long time. We keep track per engine seqno progress and
  * if there are no progress, hangcheck score for that ring is increased.
  * Further, acthd is inspected to see if the ring is stuck. On stuck case
  * we kick the ring. If we see no progress on three subsequent calls
@@ -3263,10 +3263,10 @@ static void i915_hangcheck_elapsed(unsigned long data)
 {
 	struct drm_device *dev = (struct drm_device *)data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int i;
 	int busy_count = 0, rings_hung = 0;
-	bool stuck[I915_NUM_RINGS] = { 0 };
+	bool stuck[I915_NUM_ENGINES] = { 0 };
 #define BUSY 1
 #define KICK 5
 #define HUNG 20
@@ -3274,33 +3274,33 @@ static void i915_hangcheck_elapsed(unsigned long data)
 	if (!i915.enable_hangcheck)
 		return;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv, i) {
 		u64 acthd;
 		u32 seqno;
 		bool busy = true;
 
 		semaphore_clear_deadlocks(dev_priv);
 
-		seqno = ring->get_seqno(ring, false);
-		acthd = intel_ring_get_active_head(ring);
+		seqno = engine->get_seqno(engine, false);
+		acthd = intel_engine_get_active_head(engine);
 
-		if (ring->hangcheck.seqno == seqno) {
-			if (ring_idle(ring, seqno)) {
-				ring->hangcheck.action = HANGCHECK_IDLE;
+		if (engine->hangcheck.seqno == seqno) {
+			if (engine_idle(engine, seqno)) {
+				engine->hangcheck.action = HANGCHECK_IDLE;
 
-				if (waitqueue_active(&ring->irq_queue)) {
+				if (waitqueue_active(&engine->irq_queue)) {
 					/* Issue a wake-up to catch stuck h/w. */
-					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
-						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+					if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
+						if (!(dev_priv->gpu_error.test_irq_rings & intel_engine_flag(engine)))
 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-								  ring->name);
+								  engine->name);
 						else
 							DRM_INFO("Fake missed irq on %s\n",
-								 ring->name);
-						wake_up_all(&ring->irq_queue);
+								 engine->name);
+						wake_up_all(&engine->irq_queue);
 					}
 					/* Safeguard against driver failure */
-					ring->hangcheck.score += BUSY;
+					engine->hangcheck.score += BUSY;
 				} else
 					busy = false;
 			} else {
@@ -3319,48 +3319,48 @@ static void i915_hangcheck_elapsed(unsigned long data)
 				 * being repeatedly kicked and so responsible
 				 * for stalling the machine.
 				 */
-				ring->hangcheck.action = ring_stuck(ring,
+				engine->hangcheck.action = engine_stuck(engine,
 								    acthd);
 
-				switch (ring->hangcheck.action) {
+				switch (engine->hangcheck.action) {
 				case HANGCHECK_IDLE:
 				case HANGCHECK_WAIT:
 				case HANGCHECK_ACTIVE:
 					break;
 				case HANGCHECK_ACTIVE_LOOP:
-					ring->hangcheck.score += BUSY;
+					engine->hangcheck.score += BUSY;
 					break;
 				case HANGCHECK_KICK:
-					ring->hangcheck.score += KICK;
+					engine->hangcheck.score += KICK;
 					break;
 				case HANGCHECK_HUNG:
-					ring->hangcheck.score += HUNG;
+					engine->hangcheck.score += HUNG;
 					stuck[i] = true;
 					break;
 				}
 			}
 		} else {
-			ring->hangcheck.action = HANGCHECK_ACTIVE;
+			engine->hangcheck.action = HANGCHECK_ACTIVE;
 
 			/* Gradually reduce the count so that we catch DoS
 			 * attempts across multiple batches.
 			 */
-			if (ring->hangcheck.score > 0)
-				ring->hangcheck.score--;
+			if (engine->hangcheck.score > 0)
+				engine->hangcheck.score--;
 
-			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+			engine->hangcheck.acthd = engine->hangcheck.max_acthd = 0;
 		}
 
-		ring->hangcheck.seqno = seqno;
-		ring->hangcheck.acthd = acthd;
+		engine->hangcheck.seqno = seqno;
+		engine->hangcheck.acthd = acthd;
 		busy_count += busy;
 	}
 
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+	for_each_engine(engine, dev_priv, i) {
+		if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
 			DRM_INFO("%s on %s\n",
 				 stuck[i] ? "stuck" : "no progress",
-				 ring->name);
+				 engine->name);
 			rings_hung++;
 		}
 	}
@@ -4130,7 +4130,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 		new_iir = I915_READ16(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->ring[RCS]);
+			notify_ring(dev, &dev_priv->engine[RCS]);
 
 		for_each_pipe(pipe) {
 			int plane = pipe;
@@ -4320,7 +4320,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 		new_iir = I915_READ(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->ring[RCS]);
+			notify_ring(dev, &dev_priv->engine[RCS]);
 
 		for_each_pipe(pipe) {
 			int plane = pipe;
@@ -4550,9 +4550,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 		new_iir = I915_READ(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->ring[RCS]);
+			notify_ring(dev, &dev_priv->engine[RCS]);
 		if (iir & I915_BSD_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->ring[VCS]);
+			notify_ring(dev, &dev_priv->engine[VCS]);
 
 		for_each_pipe(pipe) {
 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0b327eb..f2bc198 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8968,7 +8968,7 @@ out:
  */
 static void intel_mark_fb_busy(struct drm_device *dev,
 			       unsigned frontbuffer_bits,
-			       struct intel_engine_cs *ring)
+			       struct intel_engine_cs *engine)
 {
 	enum pipe pipe;
 
@@ -8980,24 +8980,24 @@ static void intel_mark_fb_busy(struct drm_device *dev,
 			continue;
 
 		intel_increase_pllclock(dev, pipe);
-		if (ring && intel_fbc_enabled(dev))
-			ring->fbc_dirty = true;
+		if (engine && intel_fbc_enabled(dev))
+			engine->fbc_dirty = true;
 	}
 }
 
 /**
  * intel_fb_obj_invalidate - invalidate frontbuffer object
  * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
+ * @engine: set for asynchronous rendering
  *
  * This function gets called every time rendering on the given object starts and
  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * be invalidated. If @engine is non-NULL any subsequent invalidation will be delayed
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring)
+			     struct intel_engine_cs *engine)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9007,7 +9007,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 	if (!obj->frontbuffer_bits)
 		return;
 
-	if (ring) {
+	if (engine) {
 		mutex_lock(&dev_priv->fb_tracking.lock);
 		dev_priv->fb_tracking.busy_bits
 			|= obj->frontbuffer_bits;
@@ -9016,7 +9016,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 		mutex_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+	intel_mark_fb_busy(dev, obj->frontbuffer_bits, engine);
 
 	intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
 }
@@ -9304,14 +9304,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct intel_engine_cs *engine,
 				 uint32_t flags)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret)
 		return ret;
 
@@ -9322,16 +9322,16 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(engine);
 	return 0;
 }
 
@@ -9339,14 +9339,14 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct intel_engine_cs *engine,
 				 uint32_t flags)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret)
 		return ret;
 
@@ -9354,16 +9354,16 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(engine);
 	return 0;
 }
 
@@ -9371,7 +9371,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct intel_engine_cs *engine,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9379,7 +9379,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(engine, 4);
 	if (ret)
 		return ret;
 
@@ -9387,10 +9387,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
 			obj->tiling_mode);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
@@ -9399,10 +9399,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_emit(engine, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(engine);
 	return 0;
 }
 
@@ -9410,7 +9410,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct intel_engine_cs *engine,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9418,14 +9418,14 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(engine, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -9435,10 +9435,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_emit(engine, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(engine);
 	return 0;
 }
 
@@ -9446,7 +9446,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct intel_engine_cs *engine,
 				 uint32_t flags)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -9469,7 +9469,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	}
 
 	len = 4;
-	if (ring->id == RCS) {
+	if (engine->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -9490,11 +9490,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * then do the cacheline alignment, and finally emit the
 	 * MI_DISPLAY_FLIP.
 	 */
-	ret = intel_ring_cacheline_align(ring);
+	ret = intel_ring_cacheline_align(engine);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, len);
+	ret = intel_ring_begin(engine, len);
 	if (ret)
 		return ret;
 
@@ -9507,37 +9507,37 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (ring->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+	if (engine->id == RCS) {
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(engine, DERRMR);
+		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
 					DERRMR_PIPEB_PRI_FLIP_DONE |
 					DERRMR_PIPEC_PRI_FLIP_DONE));
 		if (IS_GEN8(dev))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8(1) |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+			intel_ring_emit(engine, MI_STORE_REGISTER_MEM(1) |
 					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit(ring, DERRMR);
-		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+		intel_ring_emit(engine, DERRMR);
+		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
 		if (IS_GEN8(dev)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			intel_ring_emit(engine, 0);
+			intel_ring_emit(engine, MI_NOOP);
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
+	__intel_ring_advance(engine);
 	return 0;
 }
 
-static bool use_mmio_flip(struct intel_engine_cs *ring,
+static bool use_mmio_flip(struct intel_engine_cs *engine,
 			  struct drm_i915_gem_object *obj)
 {
 	/*
@@ -9548,10 +9548,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
 	 * So using MMIO flips there would disrupt this mechanism.
 	 */
 
-	if (ring == NULL)
+	if (engine == NULL)
 		return true;
 
-	if (INTEL_INFO(ring->dev)->gen < 5)
+	if (INTEL_INFO(engine->dev)->gen < 5)
 		return false;
 
 	if (i915.use_mmio_flip < 0)
@@ -9561,7 +9561,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
 	else if (i915.enable_execlists)
 		return true;
 	else
-		return ring != obj->ring;
+		return engine != obj->ring;
 }
 
 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
@@ -9594,7 +9594,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 
 static int intel_postpone_flip(struct drm_i915_gem_object *obj)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret;
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
@@ -9602,46 +9602,46 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj)
 	if (!obj->last_write_seqno)
 		return 0;
 
-	ring = obj->ring;
+	engine = obj->ring;
 
-	if (i915_seqno_passed(ring->get_seqno(ring, true),
+	if (i915_seqno_passed(engine->get_seqno(engine, true),
 			      obj->last_write_seqno))
 		return 0;
 
-	ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+	ret = i915_gem_check_olr(engine, obj->last_write_seqno);
 	if (ret)
 		return ret;
 
-	if (WARN_ON(!ring->irq_get(ring)))
+	if (WARN_ON(!engine->irq_get(engine)))
 		return 0;
 
 	return 1;
 }
 
-void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+void intel_notify_mmio_flip(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine->dev);
 	struct intel_crtc *intel_crtc;
 	unsigned long irq_flags;
 	u32 seqno;
 
-	seqno = ring->get_seqno(ring, false);
+	seqno = engine->get_seqno(engine, false);
 
 	spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
-	for_each_intel_crtc(ring->dev, intel_crtc) {
+	for_each_intel_crtc(engine->dev, intel_crtc) {
 		struct intel_mmio_flip *mmio_flip;
 
 		mmio_flip = &intel_crtc->mmio_flip;
 		if (mmio_flip->seqno == 0)
 			continue;
 
-		if (ring->id != mmio_flip->ring_id)
+		if (engine->id != mmio_flip->ring_id)
 			continue;
 
 		if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
 			intel_do_mmio_flip(intel_crtc);
 			mmio_flip->seqno = 0;
-			ring->irq_put(ring);
+			engine->irq_put(engine);
 		}
 	}
 	spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
@@ -9651,7 +9651,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct intel_engine_cs *engine,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9687,7 +9687,7 @@ static int intel_default_queue_flip(struct drm_device *dev,
 				    struct drm_crtc *crtc,
 				    struct drm_framebuffer *fb,
 				    struct drm_i915_gem_object *obj,
-				    struct intel_engine_cs *ring,
+				    struct intel_engine_cs *engine,
 				    uint32_t flags)
 {
 	return -ENODEV;
@@ -9705,7 +9705,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
 	struct intel_unpin_work *work;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	unsigned long flags;
 	int ret;
 
@@ -9783,32 +9783,32 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
 
 	if (IS_VALLEYVIEW(dev)) {
-		ring = &dev_priv->ring[BCS];
+		engine = &dev_priv->engine[BCS];
 		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
 			/* vlv: DISPLAY_FLIP fails to change tiling */
-			ring = NULL;
+			engine = NULL;
 	} else if (IS_IVYBRIDGE(dev)) {
-		ring = &dev_priv->ring[BCS];
+		engine = &dev_priv->engine[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		ring = obj->ring;
-		if (ring == NULL || ring->id != RCS)
-			ring = &dev_priv->ring[BCS];
+		engine = obj->ring;
+		if (engine == NULL || engine->id != RCS)
+			engine = &dev_priv->engine[BCS];
 	} else {
-		ring = &dev_priv->ring[RCS];
+		engine = &dev_priv->engine[RCS];
 	}
 
-	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+	ret = intel_pin_and_fence_fb_obj(dev, obj, engine);
 	if (ret)
 		goto cleanup_pending;
 
 	work->gtt_offset =
 		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
 
-	if (use_mmio_flip(ring, obj))
-		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+	if (use_mmio_flip(engine, obj))
+		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, engine,
 					    page_flip_flags);
 	else
-		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, engine,
 				page_flip_flags);
 	if (ret)
 		goto cleanup_unpin;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d683a20..4a76788 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -781,7 +781,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev);
 int intel_pch_rawclk(struct drm_device *dev);
 void intel_mark_busy(struct drm_device *dev);
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring);
+			     struct intel_engine_cs *engine);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c096b9b..fbc877b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -271,11 +271,11 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
 	return desc;
 }
 
-static void execlists_elsp_write(struct intel_engine_cs *ring,
+static void execlists_elsp_write(struct intel_engine_cs *engine,
 				 struct drm_i915_gem_object *ctx_obj0,
 				 struct drm_i915_gem_object *ctx_obj1)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	uint64_t temp = 0;
 	uint32_t desc[4];
 	unsigned long flags;
@@ -304,14 +304,14 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
 		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
 
-	I915_WRITE(RING_ELSP(ring), desc[1]);
-	I915_WRITE(RING_ELSP(ring), desc[0]);
-	I915_WRITE(RING_ELSP(ring), desc[3]);
+	I915_WRITE(RING_ELSP(engine), desc[1]);
+	I915_WRITE(RING_ELSP(engine), desc[0]);
+	I915_WRITE(RING_ELSP(engine), desc[3]);
 	/* The context is automatically loaded after the following */
-	I915_WRITE(RING_ELSP(ring), desc[2]);
+	I915_WRITE(RING_ELSP(engine), desc[2]);
 
 	/* ELSP is a wo register, so use another nearby reg for posting instead */
-	POSTING_READ(RING_EXECLIST_STATUS(ring));
+	POSTING_READ(RING_EXECLIST_STATUS(engine));
 
 	/* Release Force Wakeup (see the big comment above). */
 	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
@@ -335,45 +335,45 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
 	return 0;
 }
 
-static int execlists_submit_context(struct intel_engine_cs *ring,
+static int execlists_submit_context(struct intel_engine_cs *engine,
 				    struct intel_context *to0, u32 tail0,
 				    struct intel_context *to1, u32 tail1)
 {
 	struct drm_i915_gem_object *ctx_obj0;
 	struct drm_i915_gem_object *ctx_obj1 = NULL;
 
-	ctx_obj0 = to0->engine[ring->id].state;
+	ctx_obj0 = to0->ring[engine->id].state;
 	BUG_ON(!ctx_obj0);
 	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
 
 	execlists_ctx_write_tail(ctx_obj0, tail0);
 
 	if (to1) {
-		ctx_obj1 = to1->engine[ring->id].state;
+		ctx_obj1 = to1->ring[engine->id].state;
 		BUG_ON(!ctx_obj1);
 		WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
 
 		execlists_ctx_write_tail(ctx_obj1, tail1);
 	}
 
-	execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+	execlists_elsp_write(engine, ctx_obj0, ctx_obj1);
 
 	return 0;
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *ring)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
 {
 	struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
 	struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
-	assert_spin_locked(&ring->execlist_lock);
+	assert_spin_locked(&engine->execlist_lock);
 
-	if (list_empty(&ring->execlist_queue))
+	if (list_empty(&engine->execlist_queue))
 		return;
 
 	/* Try to read in pairs */
-	list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
 				 execlist_link) {
 		if (!req0) {
 			req0 = cursor;
@@ -392,7 +392,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 
 	WARN_ON(req1 && req1->elsp_submitted);
 
-	WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
+	WARN_ON(execlists_submit_context(engine, req0->ctx, req0->tail,
 					 req1 ? req1->ctx : NULL,
 					 req1 ? req1->tail : 0));
 
@@ -401,21 +401,21 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 		req1->elsp_submitted++;
 }
 
-static bool execlists_check_remove_request(struct intel_engine_cs *ring,
+static bool execlists_check_remove_request(struct intel_engine_cs *engine,
 					   u32 request_id)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct intel_ctx_submit_request *head_req;
 
-	assert_spin_locked(&ring->execlist_lock);
+	assert_spin_locked(&engine->execlist_lock);
 
-	head_req = list_first_entry_or_null(&ring->execlist_queue,
+	head_req = list_first_entry_or_null(&engine->execlist_queue,
 					    struct intel_ctx_submit_request,
 					    execlist_link);
 
 	if (head_req != NULL) {
 		struct drm_i915_gem_object *ctx_obj =
-				head_req->ctx->engine[ring->id].state;
+				head_req->ctx->ring[engine->id].state;
 		if (intel_execlists_ctx_id(ctx_obj) == request_id) {
 			WARN(head_req->elsp_submitted == 0,
 			     "Never submitted head request\n");
@@ -438,9 +438,9 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
  */
-void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
+void intel_execlists_handle_ctx_events(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	u32 status_pointer;
 	u8 read_pointer;
 	u8 write_pointer;
@@ -448,25 +448,25 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
 	u32 status_id;
 	u32 submit_contexts = 0;
 
-	status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+	status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
 
-	read_pointer = ring->next_context_status_buffer;
+	read_pointer = engine->next_context_status_buffer;
 	write_pointer = status_pointer & 0x07;
 	if (read_pointer > write_pointer)
 		write_pointer += 6;
 
-	spin_lock(&ring->execlist_lock);
+	spin_lock(&engine->execlist_lock);
 
 	while (read_pointer < write_pointer) {
 		read_pointer++;
-		status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
+		status = I915_READ(RING_CONTEXT_STATUS_BUF(engine) +
 				(read_pointer % 6) * 8);
-		status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
+		status_id = I915_READ(RING_CONTEXT_STATUS_BUF(engine) +
 				(read_pointer % 6) * 8 + 4);
 
 		if (status & GEN8_CTX_STATUS_PREEMPTED) {
 			if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
-				if (execlists_check_remove_request(ring, status_id))
+				if (execlists_check_remove_request(engine, status_id))
 					WARN(1, "Lite Restored request removed from queue\n");
 			} else
 				WARN(1, "Preemption without Lite Restore\n");
@@ -474,28 +474,28 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
 
 		 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
 		     (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
-			if (execlists_check_remove_request(ring, status_id))
+			if (execlists_check_remove_request(engine, status_id))
 				submit_contexts++;
 		}
 	}
 
 	if (submit_contexts != 0)
-		execlists_context_unqueue(ring);
+		execlists_context_unqueue(engine);
 
-	spin_unlock(&ring->execlist_lock);
+	spin_unlock(&engine->execlist_lock);
 
 	WARN(submit_contexts > 2, "More than two context complete events?\n");
-	ring->next_context_status_buffer = write_pointer % 6;
+	engine->next_context_status_buffer = write_pointer % 6;
 
-	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-		   ((u32)ring->next_context_status_buffer & 0x07) << 8);
+	I915_WRITE(RING_CONTEXT_STATUS_PTR(engine),
+		   ((u32)engine->next_context_status_buffer & 0x07) << 8);
 }
 
 static void execlists_free_request_task(struct work_struct *work)
 {
 	struct intel_ctx_submit_request *req =
 		container_of(work, struct intel_ctx_submit_request, work);
-	struct drm_device *dev = req->ring->dev;
+	struct drm_device *dev = req->engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	intel_runtime_pm_put(dev_priv);
@@ -507,12 +507,12 @@ static void execlists_free_request_task(struct work_struct *work)
 	kfree(req);
 }
 
-static int execlists_context_queue(struct intel_engine_cs *ring,
+static int execlists_context_queue(struct intel_engine_cs *engine,
 				   struct intel_context *to,
 				   u32 tail)
 {
 	struct intel_ctx_submit_request *req = NULL, *cursor;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	unsigned long flags;
 	int num_elements = 0;
 
@@ -521,22 +521,22 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 		return -ENOMEM;
 	req->ctx = to;
 	i915_gem_context_reference(req->ctx);
-	req->ring = ring;
+	req->engine = engine;
 	req->tail = tail;
 	INIT_WORK(&req->work, execlists_free_request_task);
 
 	intel_runtime_pm_get(dev_priv);
 
-	spin_lock_irqsave(&ring->execlist_lock, flags);
+	spin_lock_irqsave(&engine->execlist_lock, flags);
 
-	list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
 		if (++num_elements > 2)
 			break;
 
 	if (num_elements > 2) {
 		struct intel_ctx_submit_request *tail_req;
 
-		tail_req = list_last_entry(&ring->execlist_queue,
+		tail_req = list_last_entry(&engine->execlist_queue,
 					   struct intel_ctx_submit_request,
 					   execlist_link);
 
@@ -548,37 +548,37 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 		}
 	}
 
-	list_add_tail(&req->execlist_link, &ring->execlist_queue);
+	list_add_tail(&req->execlist_link, &engine->execlist_queue);
 	if (num_elements == 0)
-		execlists_context_unqueue(ring);
+		execlists_context_unqueue(engine);
 
-	spin_unlock_irqrestore(&ring->execlist_lock, flags);
+	spin_unlock_irqrestore(&engine->execlist_lock, flags);
 
 	return 0;
 }
 
 static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *engine = ringbuf->engine;
 	uint32_t flush_domains;
 	int ret;
 
 	flush_domains = 0;
-	if (ring->gpu_caches_dirty)
+	if (engine->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = engine->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 				 struct list_head *vmas)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *engine = ringbuf->engine;
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -587,7 +587,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		ret = i915_gem_object_sync(obj, ring);
+		ret = i915_gem_object_sync(obj, engine);
 		if (ret)
 			return ret;
 
@@ -624,7 +624,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
  * Return: non-zero if the submission fails.
  */
 int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
+			       struct intel_engine_cs *engine,
 			       struct intel_context *ctx,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas,
@@ -632,7 +632,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 			       u64 exec_start, u32 flags)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_ringbuffer *ringbuf = ctx->ring[engine->id].ringbuf;
 	int instp_mode;
 	u32 instp_mask;
 	int ret;
@@ -643,7 +643,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -687,7 +687,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 	if (ret)
 		return ret;
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (engine == &dev_priv->engine[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_logical_ring_begin(ringbuf, 4);
 		if (ret)
@@ -702,51 +702,51 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
-	ret = ring->emit_bb_start(ringbuf, exec_start, flags);
+	ret = engine->emit_bb_start(ringbuf, exec_start, flags);
 	if (ret)
 		return ret;
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, engine);
+	i915_gem_execbuffer_retire_commands(dev, file, engine, batch_obj);
 
 	return 0;
 }
 
-void intel_logical_ring_stop(struct intel_engine_cs *ring)
+void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	int ret;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	ret = intel_ring_idle(ring);
-	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+	ret = intel_engine_idle(engine);
+	if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-			  ring->name, ret);
+			  engine->name, ret);
 
 	/* TODO: Is this correct with Execlists enabled? */
-	I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-	if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-		DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+	I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+	if (wait_for_atomic((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+		DRM_ERROR("%s :timed out trying to stop engine\n", engine->name);
 		return;
 	}
-	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+	I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
 int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *engine = ringbuf->engine;
 	int ret;
 
-	if (!ring->gpu_caches_dirty)
+	if (!engine->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
@@ -761,24 +761,24 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
  */
 void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *engine = ringbuf->engine;
 	struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
 
 	intel_logical_ring_advance(ringbuf);
 
-	if (intel_ring_stopped(ring))
+	if (intel_engine_stopped(engine))
 		return;
 
-	execlists_context_queue(ring, ctx, ringbuf->tail);
+	execlists_context_queue(engine, ctx, ringbuf->tail);
 }
 
-static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
+static int logical_ring_alloc_seqno(struct intel_engine_cs *engine,
 				    struct intel_context *ctx)
 {
-	if (ring->outstanding_lazy_seqno)
+	if (engine->outstanding_lazy_seqno)
 		return 0;
 
-	if (ring->preallocated_lazy_request == NULL) {
+	if (engine->preallocated_lazy_request == NULL) {
 		struct drm_i915_gem_request *request;
 
 		request = kmalloc(sizeof(*request), GFP_KERNEL);
@@ -792,16 +792,16 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
 		request->ctx = ctx;
 		i915_gem_context_reference(request->ctx);
 
-		ring->preallocated_lazy_request = request;
+		engine->preallocated_lazy_request = request;
 	}
 
-	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+	return i915_gem_get_seqno(engine->dev, &engine->outstanding_lazy_seqno);
 }
 
 static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
 				     int bytes)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *engine = ringbuf->engine;
 	struct drm_i915_gem_request *request;
 	u32 seqno = 0;
 	int ret;
@@ -815,7 +815,7 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
 			return 0;
 	}
 
-	list_for_each_entry(request, &ring->request_list, list) {
+	list_for_each_entry(request, &engine->request_list, list) {
 		if (__intel_ring_space(request->tail, ringbuf->tail,
 				       ringbuf->size) >= bytes) {
 			seqno = request->seqno;
@@ -826,11 +826,11 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
 	if (seqno == 0)
 		return -ENOSPC;
 
-	ret = i915_wait_seqno(ring, seqno);
+	ret = i915_wait_seqno(engine, seqno);
 	if (ret)
 		return ret;
 
-	i915_gem_retire_requests_ring(ring);
+	i915_gem_retire_requests__engine(engine);
 	ringbuf->head = ringbuf->last_retired_head;
 	ringbuf->last_retired_head = -1;
 
@@ -841,8 +841,8 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
 static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 				       int bytes)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = ringbuf->engine;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long end;
 	int ret;
@@ -862,7 +862,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 	end = jiffies + 60 * HZ;
 
 	do {
-		ringbuf->head = I915_READ_HEAD(ring);
+		ringbuf->head = I915_READ_HEAD(engine);
 		ringbuf->space = intel_ring_space(ringbuf);
 		if (ringbuf->space >= bytes) {
 			ret = 0;
@@ -947,8 +947,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
  */
 int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = ringbuf->engine;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
@@ -962,7 +962,7 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
 		return ret;
 
 	/* Preallocate the olr before touching the ring */
-	ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
+	ret = logical_ring_alloc_seqno(engine, ringbuf->FIXME_lrc_ctx);
 	if (ret)
 		return ret;
 
@@ -970,32 +970,32 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
 	return 0;
 }
 
-static int gen8_init_common_ring(struct intel_engine_cs *ring)
+static int gen8_init_common_engine(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+	I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | engine->irq_keep_mask));
+	I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
-	I915_WRITE(RING_MODE_GEN7(ring),
+	I915_WRITE(RING_MODE_GEN7(engine),
 		   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
 		   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-	POSTING_READ(RING_MODE_GEN7(ring));
-	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+	POSTING_READ(RING_MODE_GEN7(engine));
+	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
-	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
 
 	return 0;
 }
 
-static int gen8_init_render_ring(struct intel_engine_cs *ring)
+static int gen8_init_render_engine(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = gen8_init_common_ring(ring);
+	ret = gen8_init_common_engine(engine);
 	if (ret)
 		return ret;
 
@@ -1007,7 +1007,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
 	 */
 	I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
-	ret = intel_init_pipe_control(ring);
+	ret = intel_init_pipe_control(engine);
 	if (ret)
 		return ret;
 
@@ -1036,9 +1036,9 @@ static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1046,25 +1046,25 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	if (engine->irq_refcount++ == 0) {
+		I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | engine->irq_keep_mask));
+		POSTING_READ(RING_IMR(engine->mmio_base));
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	if (--engine->irq_refcount == 0) {
+		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+		POSTING_READ(RING_IMR(engine->mmio_base));
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1073,8 +1073,8 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = ringbuf->engine;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t cmd;
 	int ret;
@@ -1085,7 +1085,7 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
 
 	cmd = MI_FLUSH_DW + 1;
 
-	if (ring == &dev_priv->ring[VCS]) {
+	if (engine == &dev_priv->engine[VCS]) {
 		if (invalidate_domains & I915_GEM_GPU_DOMAINS)
 			cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
 				MI_FLUSH_DW_STORE_INDEX |
@@ -1111,8 +1111,8 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_engine_cs *engine = ringbuf->engine;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
 	int ret;
 
@@ -1149,19 +1149,19 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 gen8_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
 {
-	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 }
 
 static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *engine = ringbuf->engine;
 	u32 cmd;
 	int ret;
 
@@ -1174,10 +1174,10 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
 
 	intel_logical_ring_emit(ringbuf, cmd);
 	intel_logical_ring_emit(ringbuf,
-				(ring->status_page.gfx_addr +
+				(engine->status_page.gfx_addr +
 				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
 	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
+	intel_logical_ring_emit(ringbuf, engine->outstanding_lazy_seqno);
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
 	intel_logical_ring_advance_and_submit(ringbuf);
@@ -1191,65 +1191,65 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
  * @ring: Engine Command Streamer.
  *
  */
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	intel_logical_ring_stop(ring);
-	WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-	ring->preallocated_lazy_request = NULL;
-	ring->outstanding_lazy_seqno = 0;
+	intel_logical_ring_stop(engine);
+	WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+	engine->preallocated_lazy_request = NULL;
+	engine->outstanding_lazy_seqno = 0;
 
-	if (ring->cleanup)
-		ring->cleanup(ring);
+	if (engine->cleanup)
+		engine->cleanup(engine);
 
-	i915_cmd_parser_fini_ring(ring);
+	i915_cmd_parser_fini_engine(engine);
 
-	if (ring->status_page.obj) {
-		kunmap(sg_page(ring->status_page.obj->pages->sgl));
-		ring->status_page.obj = NULL;
+	if (engine->status_page.obj) {
+		kunmap(sg_page(engine->status_page.obj->pages->sgl));
+		engine->status_page.obj = NULL;
 	}
 }
 
-static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
 {
 	int ret;
-	struct intel_context *dctx = ring->default_context;
+	struct intel_context *dctx = engine->default_context;
 	struct drm_i915_gem_object *dctx_obj;
 
 	/* Intentionally left blank. */
-	ring->buffer = NULL;
+	engine->buffer = NULL;
 
-	ring->dev = dev;
-	INIT_LIST_HEAD(&ring->active_list);
-	INIT_LIST_HEAD(&ring->request_list);
-	init_waitqueue_head(&ring->irq_queue);
+	engine->dev = dev;
+	INIT_LIST_HEAD(&engine->active_list);
+	INIT_LIST_HEAD(&engine->request_list);
+	init_waitqueue_head(&engine->irq_queue);
 
-	INIT_LIST_HEAD(&ring->execlist_queue);
-	spin_lock_init(&ring->execlist_lock);
-	ring->next_context_status_buffer = 0;
+	INIT_LIST_HEAD(&engine->execlist_queue);
+	spin_lock_init(&engine->execlist_lock);
+	engine->next_context_status_buffer = 0;
 
-	ret = intel_lr_context_deferred_create(dctx, ring);
+	ret = intel_lr_context_deferred_create(dctx, engine);
 	if (ret)
 		return ret;
 
 	/* The status page is offset 0 from the context object in LRCs. */
-	dctx_obj = dctx->engine[ring->id].state;
-	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
-	ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
-	if (ring->status_page.page_addr == NULL)
+	dctx_obj = dctx->ring[engine->id].state;
+	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
+	engine->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
+	if (engine->status_page.page_addr == NULL)
 		return -ENOMEM;
-	ring->status_page.obj = dctx_obj;
+	engine->status_page.obj = dctx_obj;
 
-	ret = i915_cmd_parser_init_ring(ring);
+	ret = i915_cmd_parser_init_engine(engine);
 	if (ret)
 		return ret;
 
-	if (ring->init) {
-		ret = ring->init(ring);
+	if (engine->init) {
+		ret = engine->init(engine);
 		if (ret)
 			return ret;
 	}
@@ -1257,132 +1257,132 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
 	return 0;
 }
 
-static int logical_render_ring_init(struct drm_device *dev)
+static int logical_render_engine_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 
-	ring->name = "render ring";
-	ring->id = RCS;
-	ring->mmio_base = RENDER_RING_BASE;
-	ring->irq_enable_mask =
+	engine->name = "render ring";
+	engine->id = RCS;
+	engine->mmio_base = RENDER_RING_BASE;
+	engine->irq_enable_mask =
 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
-	ring->irq_keep_mask =
+	engine->irq_keep_mask =
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
 	if (HAS_L3_DPF(dev))
-		ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
-	ring->init = gen8_init_render_ring;
-	ring->cleanup = intel_fini_pipe_control;
-	ring->get_seqno = gen8_get_seqno;
-	ring->set_seqno = gen8_set_seqno;
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush_render;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
-
-	return logical_ring_init(dev, ring);
+		engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+	engine->init = gen8_init_render_engine;
+	engine->cleanup = intel_fini_pipe_control;
+	engine->get_seqno = gen8_get_seqno;
+	engine->set_seqno = gen8_set_seqno;
+	engine->emit_request = gen8_emit_request;
+	engine->emit_flush = gen8_emit_flush_render;
+	engine->irq_get = gen8_logical_ring_get_irq;
+	engine->irq_put = gen8_logical_ring_put_irq;
+	engine->emit_bb_start = gen8_emit_bb_start;
+
+	return logical_ring_init(dev, engine);
 }
 
-static int logical_bsd_ring_init(struct drm_device *dev)
+static int logical_bsd_engine_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[VCS];
 
-	ring->name = "bsd ring";
-	ring->id = VCS;
-	ring->mmio_base = GEN6_BSD_RING_BASE;
-	ring->irq_enable_mask =
+	engine->name = "bsd ring";
+	engine->id = VCS;
+	engine->mmio_base = GEN6_BSD_RING_BASE;
+	engine->irq_enable_mask =
 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-	ring->irq_keep_mask =
+	engine->irq_keep_mask =
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
-	ring->init = gen8_init_common_ring;
-	ring->get_seqno = gen8_get_seqno;
-	ring->set_seqno = gen8_set_seqno;
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	engine->init = gen8_init_common_engine;
+	engine->get_seqno = gen8_get_seqno;
+	engine->set_seqno = gen8_set_seqno;
+	engine->emit_request = gen8_emit_request;
+	engine->emit_flush = gen8_emit_flush;
+	engine->irq_get = gen8_logical_ring_get_irq;
+	engine->irq_put = gen8_logical_ring_put_irq;
+	engine->emit_bb_start = gen8_emit_bb_start;
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
-static int logical_bsd2_ring_init(struct drm_device *dev)
+static int logical_bsd2_engine_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+	struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
 
-	ring->name = "bds2 ring";
-	ring->id = VCS2;
-	ring->mmio_base = GEN8_BSD2_RING_BASE;
-	ring->irq_enable_mask =
+	engine->name = "bds2 ring";
+	engine->id = VCS2;
+	engine->mmio_base = GEN8_BSD2_RING_BASE;
+	engine->irq_enable_mask =
 		GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-	ring->irq_keep_mask =
+	engine->irq_keep_mask =
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
-	ring->init = gen8_init_common_ring;
-	ring->get_seqno = gen8_get_seqno;
-	ring->set_seqno = gen8_set_seqno;
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	engine->init = gen8_init_common_engine;
+	engine->get_seqno = gen8_get_seqno;
+	engine->set_seqno = gen8_set_seqno;
+	engine->emit_request = gen8_emit_request;
+	engine->emit_flush = gen8_emit_flush;
+	engine->irq_get = gen8_logical_ring_get_irq;
+	engine->irq_put = gen8_logical_ring_put_irq;
+	engine->emit_bb_start = gen8_emit_bb_start;
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
-static int logical_blt_ring_init(struct drm_device *dev)
+static int logical_blt_engine_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[BCS];
 
-	ring->name = "blitter ring";
-	ring->id = BCS;
-	ring->mmio_base = BLT_RING_BASE;
-	ring->irq_enable_mask =
+	engine->name = "blitter ring";
+	engine->id = BCS;
+	engine->mmio_base = BLT_RING_BASE;
+	engine->irq_enable_mask =
 		GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-	ring->irq_keep_mask =
+	engine->irq_keep_mask =
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
-	ring->init = gen8_init_common_ring;
-	ring->get_seqno = gen8_get_seqno;
-	ring->set_seqno = gen8_set_seqno;
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	engine->init = gen8_init_common_engine;
+	engine->get_seqno = gen8_get_seqno;
+	engine->set_seqno = gen8_set_seqno;
+	engine->emit_request = gen8_emit_request;
+	engine->emit_flush = gen8_emit_flush;
+	engine->irq_get = gen8_logical_ring_get_irq;
+	engine->irq_put = gen8_logical_ring_put_irq;
+	engine->emit_bb_start = gen8_emit_bb_start;
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
-static int logical_vebox_ring_init(struct drm_device *dev)
+static int logical_vebox_engine_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+	struct intel_engine_cs *engine = &dev_priv->engine[VECS];
 
-	ring->name = "video enhancement ring";
-	ring->id = VECS;
-	ring->mmio_base = VEBOX_RING_BASE;
-	ring->irq_enable_mask =
+	engine->name = "video enhancement engine";
+	engine->id = VECS;
+	engine->mmio_base = VEBOX_RING_BASE;
+	engine->irq_enable_mask =
 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-	ring->irq_keep_mask =
+	engine->irq_keep_mask =
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
-	ring->init = gen8_init_common_ring;
-	ring->get_seqno = gen8_get_seqno;
-	ring->set_seqno = gen8_set_seqno;
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	engine->init = gen8_init_common_engine;
+	engine->get_seqno = gen8_get_seqno;
+	engine->set_seqno = gen8_set_seqno;
+	engine->emit_request = gen8_emit_request;
+	engine->emit_flush = gen8_emit_flush;
+	engine->irq_get = gen8_logical_ring_get_irq;
+	engine->irq_put = gen8_logical_ring_put_irq;
+	engine->emit_bb_start = gen8_emit_bb_start;
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 /**
@@ -1400,57 +1400,57 @@ int intel_logical_rings_init(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = logical_render_ring_init(dev);
+	ret = logical_render_engine_init(dev);
 	if (ret)
 		return ret;
 
 	if (HAS_BSD(dev)) {
-		ret = logical_bsd_ring_init(dev);
+		ret = logical_bsd_engine_init(dev);
 		if (ret)
-			goto cleanup_render_ring;
+			goto cleanup_render_engine;
 	}
 
 	if (HAS_BLT(dev)) {
-		ret = logical_blt_ring_init(dev);
+		ret = logical_blt_engine_init(dev);
 		if (ret)
-			goto cleanup_bsd_ring;
+			goto cleanup_bsd_engine;
 	}
 
 	if (HAS_VEBOX(dev)) {
-		ret = logical_vebox_ring_init(dev);
+		ret = logical_vebox_engine_init(dev);
 		if (ret)
-			goto cleanup_blt_ring;
+			goto cleanup_blt_engine;
 	}
 
 	if (HAS_BSD2(dev)) {
-		ret = logical_bsd2_ring_init(dev);
+		ret = logical_bsd2_engine_init(dev);
 		if (ret)
-			goto cleanup_vebox_ring;
+			goto cleanup_vebox_engine;
 	}
 
 	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
 	if (ret)
-		goto cleanup_bsd2_ring;
+		goto cleanup_bsd2_engine;
 
 	return 0;
 
-cleanup_bsd2_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
-cleanup_vebox_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
-cleanup_blt_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
-cleanup_bsd_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
-cleanup_render_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+cleanup_bsd2_engine:
+	intel_logical_ring_cleanup(&dev_priv->engine[VCS2]);
+cleanup_vebox_engine:
+	intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
+cleanup_blt_engine:
+	intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
+cleanup_bsd_engine:
+	intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
+cleanup_render_engine:
+	intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
 
 	return ret;
 }
 
 static int
 populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
-		    struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+		    struct intel_engine_cs *engine, struct intel_ringbuffer *ringbuf)
 {
 	struct drm_i915_gem_object *ring_obj = ringbuf->obj;
 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
@@ -1482,58 +1482,58 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	 * only for the first context restore: on a subsequent save, the GPU will
 	 * recreate this batchbuffer with new values (including all the missing
 	 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
-	if (ring->id == RCS)
+	if (engine->id == RCS)
 		reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
 	else
 		reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
 	reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
-	reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
+	reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(engine);
 	reg_state[CTX_CONTEXT_CONTROL+1] =
 			_MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
-	reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
+	reg_state[CTX_RING_HEAD] = RING_HEAD(engine->mmio_base);
 	reg_state[CTX_RING_HEAD+1] = 0;
-	reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
+	reg_state[CTX_RING_TAIL] = RING_TAIL(engine->mmio_base);
 	reg_state[CTX_RING_TAIL+1] = 0;
-	reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
+	reg_state[CTX_RING_BUFFER_START] = RING_START(engine->mmio_base);
 	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
-	reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
+	reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(engine->mmio_base);
 	reg_state[CTX_RING_BUFFER_CONTROL+1] =
 			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
-	reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
+	reg_state[CTX_BB_HEAD_U] = engine->mmio_base + 0x168;
 	reg_state[CTX_BB_HEAD_U+1] = 0;
-	reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
+	reg_state[CTX_BB_HEAD_L] = engine->mmio_base + 0x140;
 	reg_state[CTX_BB_HEAD_L+1] = 0;
-	reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
+	reg_state[CTX_BB_STATE] = engine->mmio_base + 0x110;
 	reg_state[CTX_BB_STATE+1] = (1<<5);
-	reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
+	reg_state[CTX_SECOND_BB_HEAD_U] = engine->mmio_base + 0x11c;
 	reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
-	reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
+	reg_state[CTX_SECOND_BB_HEAD_L] = engine->mmio_base + 0x114;
 	reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
-	reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
+	reg_state[CTX_SECOND_BB_STATE] = engine->mmio_base + 0x118;
 	reg_state[CTX_SECOND_BB_STATE+1] = 0;
-	if (ring->id == RCS) {
+	if (engine->id == RCS) {
 		/* TODO: according to BSpec, the register state context
 		 * for CHV does not have these. OTOH, these registers do
 		 * exist in CHV. I'm waiting for a clarification */
-		reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
+		reg_state[CTX_BB_PER_CTX_PTR] = engine->mmio_base + 0x1c0;
 		reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
-		reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
+		reg_state[CTX_RCS_INDIRECT_CTX] = engine->mmio_base + 0x1c4;
 		reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
-		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
+		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = engine->mmio_base + 0x1c8;
 		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
 	}
 	reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
 	reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
-	reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
+	reg_state[CTX_CTX_TIMESTAMP] = engine->mmio_base + 0x3a8;
 	reg_state[CTX_CTX_TIMESTAMP+1] = 0;
-	reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
-	reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
-	reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
-	reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
-	reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
-	reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
-	reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
-	reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
+	reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(engine, 3);
+	reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(engine, 3);
+	reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(engine, 2);
+	reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(engine, 2);
+	reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(engine, 1);
+	reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(engine, 1);
+	reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(engine, 0);
+	reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(engine, 0);
 	reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
 	reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
 	reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
@@ -1542,7 +1542,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
 	reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
 	reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
-	if (ring->id == RCS) {
+	if (engine->id == RCS) {
 		reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
 		reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
 		reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
@@ -1569,9 +1569,9 @@ void intel_lr_context_free(struct intel_context *ctx)
 {
 	int i;
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-		struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct drm_i915_gem_object *ctx_obj = ctx->ring[i].state;
+		struct intel_ringbuffer *ringbuf = ctx->ring[i].ringbuf;
 
 		if (ctx_obj) {
 			intel_destroy_ringbuffer_obj(ringbuf);
@@ -1582,13 +1582,13 @@ void intel_lr_context_free(struct intel_context *ctx)
 	}
 }
 
-static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
+static uint32_t get_lr_context_size(struct intel_engine_cs *engine)
 {
 	int ret = 0;
 
-	WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
+	WARN_ON(INTEL_INFO(engine->dev)->gen != 8);
 
-	switch (ring->id) {
+	switch (engine->id) {
 	case RCS:
 		ret = GEN8_LR_CONTEXT_RENDER_SIZE;
 		break;
@@ -1617,19 +1617,19 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
  * Return: non-zero on eror.
  */
 int intel_lr_context_deferred_create(struct intel_context *ctx,
-				     struct intel_engine_cs *ring)
+				     struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_gem_object *ctx_obj;
 	uint32_t context_size;
 	struct intel_ringbuffer *ringbuf;
 	int ret;
 
 	WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
-	if (ctx->engine[ring->id].state)
+	if (ctx->ring[engine->id].state)
 		return 0;
 
-	context_size = round_up(get_lr_context_size(ring), 4096);
+	context_size = round_up(get_lr_context_size(engine), 4096);
 
 	ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
 	if (IS_ERR(ctx_obj)) {
@@ -1648,14 +1648,14 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
 	if (!ringbuf) {
 		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
-				ring->name);
+				engine->name);
 		i915_gem_object_ggtt_unpin(ctx_obj);
 		drm_gem_object_unreference(&ctx_obj->base);
 		ret = -ENOMEM;
 		return ret;
 	}
 
-	ringbuf->ring = ring;
+	ringbuf->engine = engine;
 	ringbuf->FIXME_lrc_ctx = ctx;
 
 	ringbuf->size = 32 * PAGE_SIZE;
@@ -1673,19 +1673,19 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
-				ring->name, ret);
+				engine->name, ret);
 		goto error;
 	}
 
-	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+	ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
 		intel_destroy_ringbuffer_obj(ringbuf);
 		goto error;
 	}
 
-	ctx->engine[ring->id].ringbuf = ringbuf;
-	ctx->engine[ring->id].state = ctx_obj;
+	ctx->ring[engine->id].ringbuf = ringbuf;
+	ctx->ring[engine->id].state = ctx_obj;
 
 	return 0;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 991d449..a6f8004 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -98,7 +98,7 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
  */
 struct intel_ctx_submit_request {
 	struct intel_context *ctx;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 tail;
 
 	struct list_head execlist_link;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index dc2f4f2..39b8e90 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,16 +213,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	BUG_ON(overlay->last_flip_req);
-	ret = i915_add_request(ring, &overlay->last_flip_req);
+	ret = i915_add_request(engine, &overlay->last_flip_req);
 	if (ret)
 		return ret;
 
 	overlay->flip_tail = tail;
-	ret = i915_wait_seqno(ring, overlay->last_flip_req);
+	ret = i915_wait_seqno(engine, overlay->last_flip_req);
 	if (ret)
 		return ret;
 	i915_gem_retire_requests(dev);
@@ -236,7 +236,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	BUG_ON(overlay->active);
@@ -244,15 +244,15 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(engine, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return intel_overlay_do_wait_request(overlay, NULL);
 }
@@ -263,7 +263,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -278,15 +278,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(engine, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(engine, flip_addr);
+	intel_ring_advance(engine);
 
-	return i915_add_request(ring, &overlay->last_flip_req);
+	return i915_add_request(engine, &overlay->last_flip_req);
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -320,7 +320,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -332,27 +332,27 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret)
 		return ret;
 
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(engine, flip_addr);
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
 	} else {
-		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(ring, flip_addr);
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(engine, flip_addr);
+		intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
 }
@@ -363,13 +363,13 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	if (overlay->last_flip_req == 0)
 		return 0;
 
-	ret = i915_wait_seqno(ring, overlay->last_flip_req);
+	ret = i915_wait_seqno(engine, overlay->last_flip_req);
 	if (ret)
 		return ret;
 	i915_gem_retire_requests(dev);
@@ -389,7 +389,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	/* Only wait if there is actually an old frame to release to
@@ -400,13 +400,13 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
-		ret = intel_ring_begin(ring, 2);
+		ret = intel_ring_begin(engine, 2);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_advance(engine);
 
 		ret = intel_overlay_do_wait_request(overlay,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c8f744c..10e1133 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3634,7 +3634,7 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c
 static void gen8_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t rc6_mask = 0, rp_state_cap;
 	int unused;
 
@@ -3655,8 +3655,8 @@ static void gen8_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_ring(ring, dev_priv, unused)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv, unused)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	if (IS_BROADWELL(dev))
 		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
@@ -3717,7 +3717,7 @@ static void gen8_enable_rps(struct drm_device *dev)
 static void gen6_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 rp_state_cap;
 	u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
 	u32 gtfifodbg;
@@ -3755,8 +3755,8 @@ static void gen6_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@@ -4167,7 +4167,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
 static void cherryview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
 	int i;
 
@@ -4191,8 +4191,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 
 	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
@@ -4259,7 +4259,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
 static void valleyview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 gtfifodbg, val, rc6_mode = 0;
 	int i;
 
@@ -4296,8 +4296,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
@@ -4393,7 +4393,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
 static void ironlake_enable_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	bool was_interruptible;
 	int ret;
 
@@ -4416,31 +4416,31 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(engine, 6);
 	if (ret) {
 		ironlake_teardown_rc6(dev);
 		dev_priv->mm.interruptible = was_interruptible;
 		return;
 	}
 
-	intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
+	intel_ring_emit(engine, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+	intel_ring_emit(engine, MI_SET_CONTEXT);
+	intel_ring_emit(engine, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
 			MI_MM_SPACE_GTT |
 			MI_SAVE_EXT_STATE_EN |
 			MI_RESTORE_EXT_STATE_EN |
 			MI_RESTORE_INHIBIT);
-	intel_ring_emit(ring, MI_SUSPEND_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_SUSPEND_FLUSH);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_FLUSH);
+	intel_ring_advance(engine);
 
 	/*
 	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
 	 * does an implicit flush, combined with MI_FLUSH above, it should be
 	 * safe to assume that renderctx is valid
 	 */
-	ret = intel_ring_idle(ring);
+	ret = intel_engine_idle(engine);
 	dev_priv->mm.interruptible = was_interruptible;
 	if (ret) {
 		DRM_ERROR("failed to enable ironlake power savings\n");
@@ -4903,7 +4903,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
 bool i915_gpu_busy(void)
 {
 	struct drm_i915_private *dev_priv;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool ret = false;
 	int i;
 
@@ -4912,8 +4912,8 @@ bool i915_gpu_busy(void)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	for_each_ring(ring, dev_priv, i)
-		ret |= !list_empty(&ring->request_list);
+	for_each_engine(engine, dev_priv, i)
+		ret |= !list_empty(&engine->request_list);
 
 out_unlock:
 	spin_unlock_irq(&mchdev_lock);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ee61be6..025b6bb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,20 +34,20 @@
 #include "intel_drv.h"
 
 bool
-intel_ring_initialized(struct intel_engine_cs *ring)
+intel_engine_initialized(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 
 	if (!dev)
 		return false;
 
 	if (i915.enable_execlists) {
-		struct intel_context *dctx = ring->default_context;
-		struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
+		struct intel_context *dctx = engine->default_context;
+		struct intel_ringbuffer *ringbuf = dctx->ring[engine->id].ringbuf;
 
 		return ringbuf->obj;
 	} else
-		return ring->buffer && ring->buffer->obj;
+		return engine->buffer && engine->buffer->obj;
 }
 
 int __intel_ring_space(int head, int tail, int size)
@@ -64,17 +64,17 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf)
 				  ringbuf->tail, ringbuf->size);
 }
 
-bool intel_ring_stopped(struct intel_engine_cs *ring)
+bool intel_engine_stopped(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
 }
 
 void __intel_ring_advance(struct intel_engine_cs *ring)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
-	if (intel_ring_stopped(ring))
+	if (intel_engine_stopped(ring))
 		return;
 	ring->write_tail(ring, ringbuf->tail);
 }
@@ -454,16 +454,16 @@ static void ring_write_tail(struct intel_engine_cs *ring,
 	I915_WRITE_TAIL(ring, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	u64 acthd;
 
-	if (INTEL_INFO(ring->dev)->gen >= 8)
-		acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
-					 RING_ACTHD_UDW(ring->mmio_base));
-	else if (INTEL_INFO(ring->dev)->gen >= 4)
-		acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+	if (INTEL_INFO(engine->dev)->gen >= 8)
+		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+					 RING_ACTHD_UDW(engine->mmio_base));
+	else if (INTEL_INFO(engine->dev)->gen >= 4)
+		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
 	else
 		acthd = I915_READ(ACTHD);
 
@@ -735,7 +735,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
 	if (ret)
 		return ret;
 
-	for_each_ring(waiter, dev_priv, i) {
+	for_each_engine(waiter, dev_priv, i) {
 		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
@@ -773,7 +773,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
 	if (ret)
 		return ret;
 
-	for_each_ring(waiter, dev_priv, i) {
+	for_each_engine(waiter, dev_priv, i) {
 		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
@@ -809,7 +809,7 @@ static int gen6_signal(struct intel_engine_cs *signaller,
 	if (ret)
 		return ret;
 
-	for_each_ring(useless, dev_priv, i) {
+	for_each_engine(useless, dev_priv, i) {
 		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
 		if (mbox_reg != GEN6_NOSYNC) {
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
@@ -1136,7 +1136,7 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1610,7 +1610,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	INIT_LIST_HEAD(&ring->request_list);
 	INIT_LIST_HEAD(&ring->execlist_queue);
 	ringbuf->size = 32 * PAGE_SIZE;
-	ringbuf->ring = ring;
+	ringbuf->engine = ring;
 	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
 	init_waitqueue_head(&ring->irq_queue);
@@ -1640,7 +1640,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	if (IS_I830(dev) || IS_845G(dev))
 		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
-	ret = i915_cmd_parser_init_ring(ring);
+	ret = i915_cmd_parser_init_engine(ring);
 	if (ret)
 		goto error;
 
@@ -1656,15 +1656,15 @@ error:
 	return ret;
 }
 
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+void intel_cleanup_engine(struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 	struct intel_ringbuffer *ringbuf = ring->buffer;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(ring))
 		return;
 
-	intel_stop_ring_buffer(ring);
+	intel_stop_engine(ring);
 	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
 	intel_destroy_ringbuffer_obj(ringbuf);
@@ -1676,7 +1676,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 
 	cleanup_status_page(ring);
 
-	i915_cmd_parser_fini_ring(ring);
+	i915_cmd_parser_fini_engine(ring);
 
 	kfree(ringbuf);
 	ring->buffer = NULL;
@@ -1713,7 +1713,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
 	if (ret)
 		return ret;
 
-	i915_gem_retire_requests_ring(ring);
+	i915_gem_retire_requests__engine(ring);
 	ringbuf->head = ringbuf->last_retired_head;
 	ringbuf->last_retired_head = -1;
 
@@ -1803,7 +1803,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 	return 0;
 }
 
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_engine_idle(struct intel_engine_cs *ring)
 {
 	u32 seqno;
 	int ret;
@@ -1912,7 +1912,7 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
 	return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+void intel_engine_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2104,10 +2104,10 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
 	return 0;
 }
 
-int intel_init_render_ring_buffer(struct drm_device *dev)
+int intel_init_render_engine(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *ring = &dev_priv->engine[RCS];
 	struct drm_i915_gem_object *obj;
 	int ret;
 
@@ -2241,10 +2241,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 	return intel_init_ring_buffer(dev, ring);
 }
 
-int intel_init_bsd_ring_buffer(struct drm_device *dev)
+int intel_init_bsd_engine(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+	struct intel_engine_cs *ring = &dev_priv->engine[VCS];
 
 	ring->name = "bsd ring";
 	ring->id = VCS;
@@ -2318,10 +2318,10 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
  * Initialize the second BSD ring for Broadwell GT3.
  * It is noted that this only exists on Broadwell GT3.
  */
-int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+int intel_init_bsd2_engine(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+	struct intel_engine_cs *ring = &dev_priv->engine[VCS2];
 
 	if ((INTEL_INFO(dev)->gen != 8)) {
 		DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
@@ -2353,10 +2353,10 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 	return intel_init_ring_buffer(dev, ring);
 }
 
-int intel_init_blt_ring_buffer(struct drm_device *dev)
+int intel_init_blt_engine(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+	struct intel_engine_cs *ring = &dev_priv->engine[BCS];
 
 	ring->name = "blitter ring";
 	ring->id = BCS;
@@ -2410,10 +2410,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 	return intel_init_ring_buffer(dev, ring);
 }
 
-int intel_init_vebox_ring_buffer(struct drm_device *dev)
+int intel_init_vebox_engine(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+	struct intel_engine_cs *ring = &dev_priv->engine[VECS];
 
 	ring->name = "video enhancement ring";
 	ring->id = VECS;
@@ -2462,7 +2462,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 }
 
 int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_engine_flush_all_caches(struct intel_engine_cs *ring)
 {
 	int ret;
 
@@ -2480,7 +2480,7 @@ intel_ring_flush_all_caches(struct intel_engine_cs *ring)
 }
 
 int
-intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+intel_engine_invalidate_all_caches(struct intel_engine_cs *ring)
 {
 	uint32_t flush_domains;
 	int ret;
@@ -2500,14 +2500,14 @@ intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
 }
 
 void
-intel_stop_ring_buffer(struct intel_engine_cs *ring)
+intel_stop_engine(struct intel_engine_cs *ring)
 {
 	int ret;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(ring))
 		return;
 
-	ret = intel_ring_idle(ring);
+	ret = intel_engine_idle(ring);
 	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
 			  ring->name, ret);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 73625af..8648c42 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -29,37 +29,37 @@ struct  intel_hw_status_page {
 	struct		drm_i915_gem_object *obj;
 };
 
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
+#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
 
-#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
+#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
 
-#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
+#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
 
-#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
+#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
 
-#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
+#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
 
-#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
-#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
+#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
 
 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  */
 #define i915_semaphore_seqno_size sizeof(uint64_t)
-#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
+#define GEN8_SIGNAL_OFFSET(__engine, to)			     \
 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
-	((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) +	\
+	((__engine)->id * I915_NUM_ENGINES * i915_semaphore_seqno_size) +	\
 	(i915_semaphore_seqno_size * (to)))
 
-#define GEN8_WAIT_OFFSET(__ring, from)			     \
+#define GEN8_WAIT_OFFSET(__engine, from)			     \
 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
-	((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
-	(i915_semaphore_seqno_size * (__ring)->id))
+	((from) * I915_NUM_ENGINES * i915_semaphore_seqno_size) + \
+	(i915_semaphore_seqno_size * (__engine)->id))
 
 #define GEN8_RING_SEMAPHORE_INIT do { \
 	if (!dev_priv->semaphore_obj) { \
@@ -73,7 +73,7 @@ struct  intel_hw_status_page {
 	ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
 	} while(0)
 
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
 	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
 	HANGCHECK_ACTIVE,
@@ -84,12 +84,12 @@ enum intel_ring_hangcheck_action {
 
 #define HANGCHECK_SCORE_RING_HUNG 31
 
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
 	u64 acthd;
 	u64 max_acthd;
 	u32 seqno;
 	int score;
-	enum intel_ring_hangcheck_action action;
+	enum intel_engine_hangcheck_action action;
 	int deadlock;
 };
 
@@ -97,7 +97,7 @@ struct intel_ringbuffer {
 	struct drm_i915_gem_object *obj;
 	void __iomem *virtual_start;
 
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 
 	/*
 	 * FIXME: This backpointer is an artifact of the history of how the
@@ -125,14 +125,14 @@ struct intel_ringbuffer {
 
 struct  intel_engine_cs {
 	const char	*name;
-	enum intel_ring_id {
+	enum intel_engine_id {
 		RCS = 0x0,
 		VCS,
 		BCS,
 		VECS,
 		VCS2
 	} id;
-#define I915_NUM_RINGS 5
+#define I915_NUM_ENGINES 5
 #define LAST_USER_RING (VECS + 1)
 	u32		mmio_base;
 	struct		drm_device *dev;
@@ -143,33 +143,33 @@ struct  intel_engine_cs {
 	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
 	u32		trace_irq_seqno;
-	bool __must_check (*irq_get)(struct intel_engine_cs *ring);
-	void		(*irq_put)(struct intel_engine_cs *ring);
+	bool __must_check (*irq_get)(struct intel_engine_cs *engine);
+	void		(*irq_put)(struct intel_engine_cs *engine);
 
-	int		(*init)(struct intel_engine_cs *ring);
+	int		(*init)(struct intel_engine_cs *engine);
 
-	void		(*write_tail)(struct intel_engine_cs *ring,
+	void		(*write_tail)(struct intel_engine_cs *engine,
 				      u32 value);
-	int __must_check (*flush)(struct intel_engine_cs *ring,
+	int __must_check (*flush)(struct intel_engine_cs *engine,
 				  u32	invalidate_domains,
 				  u32	flush_domains);
-	int		(*add_request)(struct intel_engine_cs *ring);
+	int		(*add_request)(struct intel_engine_cs *engine);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
 	 * seen value is good enough. Note that the seqno will always be
 	 * monotonic, even if not coherent.
 	 */
-	u32		(*get_seqno)(struct intel_engine_cs *ring,
+	u32		(*get_seqno)(struct intel_engine_cs *engine,
 				     bool lazy_coherency);
-	void		(*set_seqno)(struct intel_engine_cs *ring,
+	void		(*set_seqno)(struct intel_engine_cs *engine,
 				     u32 seqno);
-	int		(*dispatch_execbuffer)(struct intel_engine_cs *ring,
+	int		(*dispatch_execbuffer)(struct intel_engine_cs *engine,
 					       u64 offset, u32 length,
 					       unsigned flags);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
-	void		(*cleanup)(struct intel_engine_cs *ring);
+	void		(*cleanup)(struct intel_engine_cs *engine);
 
 	/* GEN8 signal/wait table - never trust comments!
 	 *	  signal to	signal to    signal to   signal to      signal to
@@ -209,20 +209,20 @@ struct  intel_engine_cs {
 	 *  ie. transpose of f(x, y)
 	 */
 	struct {
-		u32	sync_seqno[I915_NUM_RINGS-1];
+		u32	sync_seqno[I915_NUM_ENGINES-1];
 
 		union {
 			struct {
 				/* our mbox written by others */
-				u32		wait[I915_NUM_RINGS];
+				u32		wait[I915_NUM_ENGINES];
 				/* mboxes this ring signals to */
-				u32		signal[I915_NUM_RINGS];
+				u32		signal[I915_NUM_ENGINES];
 			} mbox;
-			u64		signal_ggtt[I915_NUM_RINGS];
+			u64		signal_ggtt[I915_NUM_ENGINES];
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct intel_engine_cs *ring,
+		int	(*sync_to)(struct intel_engine_cs *engine,
 				   struct intel_engine_cs *to,
 				   u32 seqno);
 		int	(*signal)(struct intel_engine_cs *signaller,
@@ -235,7 +235,7 @@ struct  intel_engine_cs {
 	struct list_head execlist_queue;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-	int		(*emit_request)(struct intel_ringbuffer *ringbuf);
+	int		(*emit_request)(struct intel_ringbuffer *enginebuf);
 	int		(*emit_flush)(struct intel_ringbuffer *ringbuf,
 				      u32 invalidate_domains,
 				      u32 flush_domains);
@@ -273,7 +273,7 @@ struct  intel_engine_cs {
 	struct intel_context *default_context;
 	struct intel_context *last_context;
 
-	struct intel_ring_hangcheck hangcheck;
+	struct intel_engine_hangcheck hangcheck;
 
 	struct {
 		struct drm_i915_gem_object *obj;
@@ -315,16 +315,16 @@ struct  intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
-bool intel_ring_initialized(struct intel_engine_cs *ring);
+bool intel_engine_initialized(struct intel_engine_cs *engine);
 
 static inline unsigned
-intel_ring_flag(struct intel_engine_cs *ring)
+intel_engine_flag(struct intel_engine_cs *engine)
 {
-	return 1 << ring->id;
+	return 1 << engine->id;
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *ring,
+intel_engine_sync_index(struct intel_engine_cs *engine,
 		      struct intel_engine_cs *other)
 {
 	int idx;
@@ -337,27 +337,27 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
 	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
 	 */
 
-	idx = (other - ring) - 1;
+	idx = (other - engine) - 1;
 	if (idx < 0)
-		idx += I915_NUM_RINGS;
+		idx += I915_NUM_ENGINES;
 
 	return idx;
 }
 
 static inline u32
-intel_read_status_page(struct intel_engine_cs *ring,
+intel_read_status_page(struct intel_engine_cs *engine,
 		       int reg)
 {
 	/* Ensure that the compiler doesn't optimize away the load. */
 	barrier();
-	return ring->status_page.page_addr[reg];
+	return engine->status_page.page_addr[reg];
 }
 
 static inline void
-intel_write_status_page(struct intel_engine_cs *ring,
+intel_write_status_page(struct intel_engine_cs *engine,
 			int reg, u32 value)
 {
-	ring->status_page.page_addr[reg] = value;
+	engine->status_page.page_addr[reg] = value;
 }
 
 /**
@@ -383,60 +383,59 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
 			       struct intel_ringbuffer *ringbuf);
 
-void intel_stop_ring_buffer(struct intel_engine_cs *ring);
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+void intel_stop_engine(struct intel_engine_cs *engine);
+void intel_cleanup_engine(struct intel_engine_cs *engine);
 
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
+int __must_check intel_ring_begin(struct intel_engine_cs *engine, int n);
+int __must_check intel_ring_cacheline_align(struct intel_engine_cs *engine);
+static inline void intel_ring_emit(struct intel_engine_cs *engine,
 				   u32 data)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct intel_ringbuffer *ringbuf = engine->buffer;
 	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
 	ringbuf->tail += 4;
 }
-static inline void intel_ring_advance(struct intel_engine_cs *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *engine)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct intel_ringbuffer *ringbuf = engine->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
 }
 int __intel_ring_space(int head, int tail, int size);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
-bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
+bool intel_engine_stopped(struct intel_engine_cs *engine);
+void __intel_ring_advance(struct intel_engine_cs *engine);
 
-int __must_check intel_ring_idle(struct intel_engine_cs *ring);
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int __must_check intel_engine_idle(struct intel_engine_cs *engine);
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+int intel_engine_flush_all_caches(struct intel_engine_cs *engine);
+int intel_engine_invalidate_all_caches(struct intel_engine_cs *engine);
 
-void intel_fini_pipe_control(struct intel_engine_cs *ring);
-int intel_init_pipe_control(struct intel_engine_cs *ring);
+void intel_fini_pipe_control(struct intel_engine_cs *engine);
+int intel_init_pipe_control(struct intel_engine_cs *engine);
 
-int intel_init_render_ring_buffer(struct drm_device *dev);
-int intel_init_bsd_ring_buffer(struct drm_device *dev);
-int intel_init_bsd2_ring_buffer(struct drm_device *dev);
-int intel_init_blt_ring_buffer(struct drm_device *dev);
-int intel_init_vebox_ring_buffer(struct drm_device *dev);
+int intel_init_render_engine(struct drm_device *dev);
+int intel_init_bsd_engine(struct drm_device *dev);
+int intel_init_bsd2_engine(struct drm_device *dev);
+int intel_init_blt_engine(struct drm_device *dev);
+int intel_init_vebox_engine(struct drm_device *dev);
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
-void intel_ring_setup_status_page(struct intel_engine_cs *ring);
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
 
 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 {
 	return ringbuf->tail;
 }
 
-static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
-	BUG_ON(ring->outstanding_lazy_seqno == 0);
-	return ring->outstanding_lazy_seqno;
+	BUG_ON(engine->outstanding_lazy_seqno == 0);
+	return engine->outstanding_lazy_seqno;
 }
 
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
+static inline void i915_trace_irq_get(struct intel_engine_cs *engine, u32 seqno)
 {
-	if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
-		ring->trace_irq_seqno = seqno;
+	if (engine->trace_irq_seqno == 0 && engine->irq_get(engine))
+		engine->trace_irq_seqno = seqno;
 }
 
 #endif /* _INTEL_RINGBUFFER_H_ */
-- 
1.9.1




More information about the Intel-gfx mailing list