[Intel-gfx] [PATCH 078/190] drm/i915: Split early global GTT initialisation

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 01:17:29 PST 2016


Initialising the global GTT is tricky as we wish to use the drm_mm range
manager during the modesetting initialisation (to capture stolen
allocations from the BIOS) before we actually enable GEM. To overcome
this, we currently setup the drm_mm first and then carefully rebind
them.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_dma.c        |  2 ++
 drivers/gpu/drm/i915/i915_gem.c        |  5 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c    | 62 +++++++++++-----------------------
 drivers/gpu/drm/i915/i915_gem_gtt.h    |  1 +
 drivers/gpu/drm/i915/i915_gem_stolen.c | 17 +++++-----
 5 files changed, 33 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c0242ce45e43..4a24831a14fa 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -989,6 +989,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
 					      aperture_size);
 
+	i915_gem_init_global_gtt(dev);
+
 	/* The i915 workqueue is primarily used for batched retirement of
 	 * requests (and thus managing bo) once the task has been completed
 	 * by the GPU. i915_gem_retire_requests() is called directly when we
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e4d7c7f5aca2..44bd514a6c2e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4237,7 +4237,9 @@ int i915_gem_init(struct drm_device *dev)
 	if (ret)
 		goto out_unlock;
 
-	i915_gem_init_global_gtt(dev);
+	ret = i915_global_gtt_setup(dev);
+	if (ret)
+		goto out_unlock;
 
 	ret = i915_gem_context_init(dev);
 	if (ret)
@@ -4312,7 +4314,6 @@ i915_gem_load(struct drm_device *dev)
 				  SLAB_HWCACHE_ALIGN,
 				  NULL);
 
-	INIT_LIST_HEAD(&dev_priv->vm_list);
 	INIT_LIST_HEAD(&dev_priv->context_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6168182a87d8..b5c3bbe6dc2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2681,10 +2681,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
 	}
 }
 
-static int i915_gem_setup_global_gtt(struct drm_device *dev,
-				     u64 start,
-				     u64 mappable_end,
-				     u64 end)
+int i915_global_gtt_setup(struct drm_device *dev)
 {
 	/* Let GEM Manage all of the aperture.
 	 *
@@ -2697,48 +2694,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 	 */
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
-	struct drm_mm_node *entry;
-	struct drm_i915_gem_object *obj;
 	unsigned long hole_start, hole_end;
+	struct drm_mm_node *entry;
 	int ret;
 
-	BUG_ON(mappable_end > end);
-
-	ggtt_vm->start = start;
-
-	/* Subtract the guard page before address space initialization to
-	 * shrink the range used by drm_mm */
-	ggtt_vm->total = end - start - PAGE_SIZE;
-	i915_address_space_init(ggtt_vm, dev_priv);
-	ggtt_vm->total += PAGE_SIZE;
-
 	if (intel_vgpu_active(dev)) {
 		ret = intel_vgt_balloon(dev);
 		if (ret)
 			return ret;
 	}
 
-	if (!HAS_LLC(dev))
-		ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
-
-	/* Mark any preallocated objects as occupied */
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
-
-		DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
-			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
-
-		WARN_ON(i915_gem_obj_ggtt_bound(obj));
-		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
-		if (ret) {
-			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
-			return ret;
-		}
-		vma->bound |= GLOBAL_BIND;
-		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
-	}
-
 	/* Clear any non-preallocated blocks */
 	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -2748,7 +2713,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 	}
 
 	/* And finally clear the reserved guard page */
-	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+	ggtt_vm->clear_range(ggtt_vm,
+			     ggtt_vm->total - PAGE_SIZE, PAGE_SIZE,
+			     true);
 
 	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
 		struct i915_hw_ppgtt *ppgtt;
@@ -2788,13 +2755,22 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 
 void i915_gem_init_global_gtt(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u64 gtt_size, mappable_size;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
 
-	gtt_size = dev_priv->gtt.base.total;
-	mappable_size = dev_priv->gtt.mappable_end;
+	INIT_LIST_HEAD(&dev_priv->vm_list);
 
-	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+	if (WARN_ON(dev_priv->gtt.mappable_end > ggtt_vm->total))
+		dev_priv->gtt.mappable_end = ggtt_vm->total;
+
+	if (!HAS_LLC(dev))
+		ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+
+	/* Subtract the guard page before address space initialization to
+	 * shrink the range used by drm_mm */
+	ggtt_vm->total -= PAGE_SIZE;
+	i915_address_space_init(ggtt_vm, dev_priv);
+	ggtt_vm->total += PAGE_SIZE;
 }
 
 void i915_global_gtt_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 2497671d1e1a..cb796c1ff6a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -514,6 +514,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
 
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_init_global_gtt(struct drm_device *dev);
+int i915_global_gtt_setup(struct drm_device *dev);
 void i915_global_gtt_cleanup(struct drm_device *dev);
 
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 590e635cb65c..463be259a505 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -683,18 +683,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	 */
 	vma->node.start = gtt_offset;
 	vma->node.size = size;
-	if (drm_mm_initialized(&ggtt->mm)) {
-		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
-		if (ret) {
-			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-			goto err;
-		}
 
-		vma->bound |= GLOBAL_BIND;
-		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+	ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+	if (ret) {
+		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+		goto err;
 	}
 
+	vma->bound |= GLOBAL_BIND;
+	__i915_vma_set_map_and_fenceable(vma);
+	list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	i915_gem_object_pin_pages(obj);
 
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list