[Intel-gfx] [PATCH 09/12] drm/i915: Rename intel_context[engine].ringbuf

Chris Wilson chris at chris-wilson.co.uk
Fri Nov 20 04:43:49 PST 2015


Perform s/ringbuf/ring/ on the context struct for consistency with the
ring/engine split.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |  2 +-
 drivers/gpu/drm/i915/i915_drv.h         |  4 +-
 drivers/gpu/drm/i915/i915_gpu_error.c   |  4 +-
 drivers/gpu/drm/i915/intel_lrc.c        | 85 ++++++++++++++++-----------------
 drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++---
 5 files changed, 52 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 56375c36b381..630717fec688 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1950,7 +1950,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 				struct drm_i915_gem_object *ctx_obj =
 					ctx->engine[i].state;
 				struct intel_ringbuffer *ringbuf =
-					ctx->engine[i].ringbuf;
+					ctx->engine[i].ring;
 
 				seq_printf(m, "%s: ", ring->name);
 				if (ctx_obj)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b7eaa2deb437..d8bd58cbb727 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -882,9 +882,9 @@ struct intel_context {
 
 	/* Execlists */
 	bool rcs_initialized;
-	struct {
+	struct intel_context_engine {
 		struct drm_i915_gem_object *state;
-		struct intel_ringbuffer *ringbuf;
+		struct intel_ringbuffer *ring;
 		int pin_count;
 	} engine[I915_NUM_RINGS];
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 974e3481e449..8b37f72bd91f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1040,9 +1040,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
 			 * executed).
 			 */
 			if (request)
-				rbuf = request->ctx->engine[ring->id].ringbuf;
+				rbuf = request->ctx->engine[ring->id].ring;
 			else
-				rbuf = ring->default_context->engine[ring->id].ringbuf;
+				rbuf = ring->default_context->engine[ring->id].ring;
 		} else
 			rbuf = ring->buffer;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 346f5889738e..222ae8383f48 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -381,24 +381,24 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
 	execlists_elsp_write(rq0, rq1);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *ring)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
 	struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
 
-	assert_spin_locked(&ring->execlist_lock);
+	assert_spin_locked(&engine->execlist_lock);
 
 	/*
 	 * If irqs are not active generate a warning as batches that finish
 	 * without the irqs may get lost and a GPU Hang may occur.
 	 */
-	WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
+	WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
 
-	if (list_empty(&ring->execlist_queue))
+	if (list_empty(&engine->execlist_queue))
 		return;
 
 	/* Try to read in pairs */
-	list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
 				 execlist_link) {
 		if (!req0) {
 			req0 = cursor;
@@ -408,7 +408,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 			cursor->elsp_submitted = req0->elsp_submitted;
 			list_del(&req0->execlist_link);
 			list_add_tail(&req0->execlist_link,
-				&ring->execlist_retired_req_list);
+				&engine->execlist_retired_req_list);
 			req0 = cursor;
 		} else {
 			req1 = cursor;
@@ -416,7 +416,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 		}
 	}
 
-	if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+	if (IS_GEN8(engine->dev) || IS_GEN9(engine->dev)) {
 		/*
 		 * WaIdleLiteRestore: make sure we never cause a lite
 		 * restore with HEAD==TAIL
@@ -428,11 +428,11 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 			 * for where we prepare the padding after the end of the
 			 * request.
 			 */
-			struct intel_ringbuffer *ringbuf;
+			struct intel_ringbuffer *ring;
 
-			ringbuf = req0->ctx->engine[ring->id].ringbuf;
+			ring = req0->ctx->engine[engine->id].ring;
 			req0->tail += 8;
-			req0->tail &= ringbuf->size - 1;
+			req0->tail &= ring->size - 1;
 		}
 	}
 
@@ -633,7 +633,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 {
 	int ret;
 
-	request->ring = request->ctx->engine[request->engine->id].ringbuf;
+	request->ring = request->ctx->engine[request->engine->id].ring;
 
 	if (request->ctx != request->engine->default_context) {
 		ret = intel_lr_context_pin(request);
@@ -2087,17 +2087,16 @@ void intel_lr_context_free(struct intel_context *ctx)
 		struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
 		if (ctx_obj) {
-			struct intel_ringbuffer *ringbuf =
-					ctx->engine[i].ringbuf;
-			struct intel_engine_cs *engine = ringbuf->engine;
+			struct intel_ringbuffer *ring = ctx->engine[i].ring;
+			struct intel_engine_cs *engine = ring->engine;
 
 			if (ctx == engine->default_context) {
-				intel_unpin_ringbuffer_obj(ringbuf);
+				intel_unpin_ringbuffer_obj(ring);
 				i915_gem_object_ggtt_unpin(ctx_obj);
 			}
 			WARN_ON(ctx->engine[engine->id].pin_count);
-			intel_destroy_ringbuffer_obj(ringbuf);
-			kfree(ringbuf);
+			intel_destroy_ringbuffer_obj(ring);
+			kfree(ring);
 			drm_gem_object_unreference(&ctx_obj->base);
 		}
 	}
@@ -2164,7 +2163,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 	struct drm_device *dev = engine->dev;
 	struct drm_i915_gem_object *ctx_obj;
 	uint32_t context_size;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	int ret;
 
 	WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
@@ -2188,25 +2187,25 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 		}
 	}
 
-	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-	if (!ringbuf) {
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring) {
 		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
 				engine->name);
 		ret = -ENOMEM;
 		goto error_unpin_ctx;
 	}
 
-	ringbuf->engine = engine;
+	ring->engine = engine;
 
-	ringbuf->size = 32 * PAGE_SIZE;
-	ringbuf->effective_size = ringbuf->size;
-	ringbuf->head = 0;
-	ringbuf->tail = 0;
-	ringbuf->last_retired_head = -1;
-	intel_ring_update_space(ringbuf);
+	ring->size = 32 * PAGE_SIZE;
+	ring->effective_size = ring->size;
+	ring->head = 0;
+	ring->tail = 0;
+	ring->last_retired_head = -1;
+	intel_ring_update_space(ring);
 
-	if (ringbuf->obj == NULL) {
-		ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+	if (ring->obj == NULL) {
+		ret = intel_alloc_ringbuffer_obj(dev, ring);
 		if (ret) {
 			DRM_DEBUG_DRIVER(
 				"Failed to allocate ringbuffer obj %s: %d\n",
@@ -2215,7 +2214,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 		}
 
 		if (is_global_default_ctx) {
-			ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+			ret = intel_pin_and_map_ringbuffer_obj(dev, ring);
 			if (ret) {
 				DRM_ERROR(
 					"Failed to pin and map ringbuffer %s: %d\n",
@@ -2226,13 +2225,13 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 
 	}
 
-	ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
+	ret = populate_lr_context(ctx, ctx_obj, engine, ring);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
 		goto error;
 	}
 
-	ctx->engine[engine->id].ringbuf = ringbuf;
+	ctx->engine[engine->id].ring = ring;
 	ctx->engine[engine->id].state = ctx_obj;
 
 	if (ctx == engine->default_context)
@@ -2249,7 +2248,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 			if (ret) {
 				DRM_ERROR("ring init context: %d\n", ret);
 				i915_gem_request_cancel(req);
-				ctx->engine[engine->id].ringbuf = NULL;
+				ctx->engine[engine->id].ring = NULL;
 				ctx->engine[engine->id].state = NULL;
 				goto error;
 			}
@@ -2264,11 +2263,11 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 
 error:
 	if (is_global_default_ctx)
-		intel_unpin_ringbuffer_obj(ringbuf);
+		intel_unpin_ringbuffer_obj(ring);
 error_destroy_rbuf:
-	intel_destroy_ringbuffer_obj(ringbuf);
+	intel_destroy_ringbuffer_obj(ring);
 error_free_rbuf:
-	kfree(ringbuf);
+	kfree(ring);
 error_unpin_ctx:
 	if (is_global_default_ctx)
 		i915_gem_object_ggtt_unpin(ctx_obj);
@@ -2280,14 +2279,12 @@ void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *unused;
 	int i;
 
-	for_each_ring(ring, dev_priv, i) {
-		struct drm_i915_gem_object *ctx_obj =
-				ctx->engine[ring->id].state;
-		struct intel_ringbuffer *ringbuf =
-				ctx->engine[ring->id].ringbuf;
+	for_each_ring(unused, dev_priv, i) {
+		struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
+		struct intel_ringbuffer *ring = ctx->engine[i].ring;
 		uint32_t *reg_state;
 		struct page *page;
 
@@ -2306,7 +2303,7 @@ void intel_lr_context_reset(struct drm_device *dev,
 
 		kunmap_atomic(reg_state);
 
-		ringbuf->head = 0;
-		ringbuf->tail = 0;
+		ring->head = 0;
+		ring->tail = 0;
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d43a24b6268..f6b7e209cc3c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,20 +34,20 @@
 #include "intel_drv.h"
 
 bool
-intel_ring_initialized(struct intel_engine_cs *ring)
+intel_ring_initialized(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 
 	if (!dev)
 		return false;
 
 	if (i915.enable_execlists) {
-		struct intel_context *dctx = ring->default_context;
-		struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
+		struct intel_context *dctx = engine->default_context;
+		struct intel_ringbuffer *ring = dctx->engine[engine->id].ring;
 
-		return ringbuf->obj;
+		return ring->obj;
 	} else
-		return ring->buffer && ring->buffer->obj;
+		return engine->buffer && engine->buffer->obj;
 }
 
 int __intel_ring_space(int head, int tail, int size)
-- 
2.6.2



More information about the Intel-gfx mailing list