[Intel-gfx] [PATCH 188/190] drm/i915: Use VMA for ringbuffer tracking

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 03:01:29 PST 2016


Use the GGTT VMA as the primary cookie for handing ring objects as
the most common action upon the ring is mapping and unmapping which act
upon the VMA itself. By restructuring the code to work with the ring
VMA, we can shrink the code and remove a few cycles from context pinning.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |   2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c | 135 ++++++++++++++------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |   2 +-
 3 files changed, 61 insertions(+), 78 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7fb4088b3966..af2ec70dd7ab 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -379,7 +379,7 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
 		if (ctx->engine[n].state)
 			per_file_stats(0, ctx->engine[n].state->obj, data);
 		if (ctx->engine[n].ring)
-			per_file_stats(0, ctx->engine[n].ring->obj, data);
+			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 41c52cdcbe4a..512841df2527 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1899,108 +1899,91 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
 
 int intel_ring_map(struct intel_ring *ring)
 {
-	struct drm_i915_gem_object *obj = ring->obj;
-	struct i915_vma *vma;
+	void *ptr;
 	int ret;
 
-	if (HAS_LLC(ring->engine->i915) && !obj->stolen) {
-		vma = i915_gem_object_ggtt_pin(obj, NULL,
-					       0, PAGE_SIZE,
-					       PIN_HIGH);
-		if (IS_ERR(vma))
-			return PTR_ERR(vma);
+	GEM_BUG_ON(ring->virtual_start);
 
-		ret = i915_gem_object_set_to_cpu_domain(obj, true);
-		if (ret)
-			goto unpin;
-
-		ring->virtual_start = i915_gem_object_pin_vmap(obj);
-		if (IS_ERR(ring->virtual_start)) {
-			ret = PTR_ERR(ring->virtual_start);
-			ring->virtual_start = NULL;
-			goto unpin;
-		}
-	} else {
-		vma = i915_gem_object_ggtt_pin(obj, NULL,
-					       0, PAGE_SIZE,
-					       PIN_MAPPABLE);
-		if (IS_ERR(vma))
-			return PTR_ERR(vma);
+	ret = i915_vma_pin(ring->vma, 0, PAGE_SIZE,
+			   PIN_GLOBAL | (ring->vmap ? PIN_HIGH : PIN_MAPPABLE));
+	if (unlikely(ret))
+		return ret;
 
-		ret = i915_gem_object_set_to_gtt_domain(obj, true);
-		if (ret)
-			goto unpin;
-
-		ring->virtual_start = ioremap_wc(ring->engine->i915->gtt.mappable_base +
-						 vma->node.start,
-						 ring->size);
-		if (ring->virtual_start == NULL) {
-			ret = -ENOMEM;
-			goto unpin;
-		}
+	if (ring->vmap)
+		ptr = i915_gem_object_pin_vmap(ring->vma->obj);
+	else
+		ptr = i915_vma_iomap(ring->engine->i915, ring->vma);
+	if (IS_ERR(ptr)) {
+		i915_vma_unpin(ring->vma);
+		return PTR_ERR(ptr);
 	}
 
-	ring->vma = vma;
+	ring->virtual_start = ptr;
 	return 0;
-
-unpin:
-	i915_vma_unpin(vma);
-	return ret;
 }
 
 void intel_ring_unmap(struct intel_ring *ring)
 {
-	if (HAS_LLC(ring->engine->i915) && !ring->obj->stolen)
-		i915_gem_object_unpin_vmap(ring->obj);
-	else
-		iounmap(ring->virtual_start);
+	GEM_BUG_ON(ring->virtual_start == NULL);
 
-	i915_vma_unpin(ring->vma);
-	ring->vma = NULL;
-}
+	if (ring->vmap)
+		i915_gem_object_unpin_vmap(ring->vma->obj);
+	ring->virtual_start = NULL;
 
-static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
-{
-	__i915_gem_object_release_unless_active(ringbuf->obj);
-	ringbuf->obj = NULL;
+	i915_vma_unpin(ring->vma);
 }
 
-static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-				      struct intel_ring *ringbuf)
+static struct i915_vma *
+intel_ring_create_vma(struct drm_device *dev, int size)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int ret;
 
 	obj = NULL;
 	if (!HAS_LLC(dev))
-		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
+		obj = i915_gem_object_create_stolen(dev, size);
 	if (obj == NULL)
-		obj = i915_gem_alloc_object(dev, ringbuf->size);
+		obj = i915_gem_alloc_object(dev, size);
 	if (obj == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	/* mark ring buffers as read-only from GPU side by default */
 	obj->gt_ro = 1;
 
-	ringbuf->obj = obj;
+	if (HAS_LLC(dev) && !obj->stolen)
+		ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	else
+		ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret) {
+		vma = ERR_PTR(ret);
+		goto err;
+	}
+
+	vma = i915_gem_obj_lookup_or_create_vma(obj,
+						&to_i915(dev)->gtt.base,
+						NULL);
+	if (IS_ERR(vma))
+		goto err;
+
+	return vma;
 
-	return 0;
+err:
+	drm_gem_object_unreference(&obj->base);
+	return vma;
 }
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 {
 	struct intel_ring *ring;
-	int ret;
+	struct i915_vma *vma;
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-	if (ring == NULL) {
-		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
-				 engine->name);
+	if (ring == NULL)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	ring->engine = engine;
-	list_add(&ring->link, &engine->buffers);
 
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
@@ -2008,28 +1991,29 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 	 * of the buffer.
 	 */
 	ring->effective_size = size;
-	if (IS_I830(engine->dev) || IS_845G(engine->dev))
+	if (IS_I830(engine->i915) || IS_845G(engine->i915))
 		ring->effective_size -= 2 * CACHELINE_BYTES;
 
 	ring->last_retired_head = -1;
 	intel_ring_update_space(ring);
 
-	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
-	if (ret) {
-		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
-				 engine->name, ret);
-		list_del(&ring->link);
+	vma = intel_ring_create_vma(engine->dev, size);
+	if (IS_ERR(vma)) {
 		kfree(ring);
-		return ERR_PTR(ret);
+		return ERR_CAST(vma);
 	}
+	ring->vma = vma;
+	if (HAS_LLC(engine->i915) && !vma->obj->stolen)
+		ring->vmap = true;
 
+	list_add(&ring->link, &engine->buffers);
 	return ring;
 }
 
 void
 intel_ring_free(struct intel_ring *ring)
 {
-	intel_destroy_ringbuffer_obj(ring);
+	__i915_gem_object_release_unless_active(ring->vma->obj);
 	list_del(&ring->link);
 	kfree(ring);
 }
@@ -2058,7 +2042,6 @@ static int intel_init_engine(struct drm_device *dev,
 		ret = PTR_ERR(ringbuf);
 		goto error;
 	}
-	engine->buffer = ringbuf;
 
 	if (I915_NEED_GFX_HWS(dev)) {
 		ret = init_status_page(engine);
@@ -2073,12 +2056,12 @@ static int intel_init_engine(struct drm_device *dev,
 
 	ret = intel_ring_map(ringbuf);
 	if (ret) {
-		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-				engine->name, ret);
-		intel_destroy_ringbuffer_obj(ringbuf);
+		intel_ring_free(ringbuf);
 		goto error;
 	}
 
+	engine->buffer = ringbuf;
+
 	ret = i915_cmd_parser_init_ring(engine);
 	if (ret)
 		goto error;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d24d0e438f49..3ae941b338ca 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -95,7 +95,6 @@ struct intel_engine_hangcheck {
 };
 
 struct intel_ring {
-	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 	void *virtual_start;
 
@@ -110,6 +109,7 @@ struct intel_ring {
 	int reserved_size;
 	int reserved_tail;
 	bool reserved_in_use;
+	bool vmap;
 
 	/** We track the position of the requests in the ring buffer, and
 	 * when each is retired we increment last_retired_head as the GPU
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list