[Intel-gfx] [PATCH 135/190] drm/i915: Move map-and-fenceable tracking to the VMA

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 02:45:19 PST 2016


By moving map-and-fenceable tracking from the object to the VMA, we gain
fine-grained tracking and the ability to track individual fences on the VMA
(subsequent patch).

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        | 46 +++++++++++++-----------------
 drivers/gpu/drm/i915/i915_drv.h            |  6 ----
 drivers/gpu/drm/i915/i915_gem.c            | 31 +++++++++-----------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +-
 drivers/gpu/drm/i915/i915_gem_fence.c      |  5 +---
 drivers/gpu/drm/i915/i915_gem_gtt.h        |  6 ++++
 drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 +--
 drivers/gpu/drm/i915/intel_display.c       |  6 ++--
 8 files changed, 46 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e923dc192f54..418b80de5246 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -112,19 +112,6 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 	return i915_gem_object_to_ggtt(obj, NULL) ? "g" : " ";
 }
 
-static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
-{
-	u64 size = 0;
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
-			size += vma->node.size;
-	}
-
-	return size;
-}
-
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
@@ -309,17 +296,6 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 	return 0;
 }
 
-#define count_objects(list, member) do { \
-	list_for_each_entry(obj, list, member) { \
-		size += i915_gem_obj_total_ggtt_size(obj); \
-		++count; \
-		if (obj->map_and_fenceable) { \
-			mappable_size += obj->base.size; \
-			++mappable_count; \
-		} \
-	} \
-} while (0)
-
 struct file_stats {
 	struct drm_i915_file_private *file_priv;
 	unsigned long count;
@@ -404,7 +380,7 @@ static void print_batch_pool_stats(struct seq_file *m,
 	list_for_each_entry(vma, list, member) { \
 		size += vma->size; \
 		++count; \
-		if (vma->obj->map_and_fenceable) { \
+		if (vma->map_and_fenceable) { \
 			mappable_size += vma->size; \
 			++mappable_count; \
 		} \
@@ -433,7 +409,25 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 		   dev_priv->mm.object_memory);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.bound_list, global_list);
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		bool allocated = false, mappable = false;
+
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
+			if (!vma->is_ggtt)
+				continue;
+
+			allocated = true;
+			size += vma->node.size;
+
+			if (vma->map_and_fenceable) {
+				mappable = true;
+				mappable_size += vma->node.size;
+			}
+		}
+
+		count += allocated;
+		mappable_count += mappable;
+	}
 	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
 		   count, mappable_count, size, mappable_size);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 45a0da0947cd..cfc4430d3b50 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2078,12 +2078,6 @@ struct drm_i915_gem_object {
 	unsigned int fence_dirty:1;
 
 	/**
-	 * Is the object at the current location in the gtt mappable and
-	 * fenceable? Used to avoid costly recalculations.
-	 */
-	unsigned int map_and_fenceable:1;
-
-	/**
 	 * Whether the current gtt mapping needs to be mappable (and isn't just
 	 * mappable by accident). Track pin and fault separate for a more
 	 * accurate mappable working set.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d81821c6f9a1..0c4e8e1aeeff 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2691,13 +2691,15 @@ int i915_vma_unbind(struct i915_vma *vma)
 	GEM_BUG_ON(obj->bind_count == 0);
 	GEM_BUG_ON(obj->pages == NULL);
 
-	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+	if (vma->map_and_fenceable) {
 		i915_gem_object_finish_gtt(obj);
 
 		/* release the fence reg _after_ flushing */
 		ret = i915_gem_object_put_fence(obj);
 		if (ret)
 			return ret;
+
+		vma->map_and_fenceable = false;
 	}
 
 	if (likely(!vma->vm->closed)) {
@@ -2709,10 +2711,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	drm_mm_remove_node(&vma->node);
 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-	if (vma->is_ggtt) {
-		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
-			obj->map_and_fenceable = false;
-		} else if (vma->ggtt_view.pages) {
+	if (vma->ggtt_view.pages) {
+		if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
 			sg_free_table(vma->ggtt_view.pages);
 			kfree(vma->ggtt_view.pages);
 		}
@@ -3480,8 +3480,6 @@ i915_vma_misplaced(struct i915_vma *vma,
 		   uint64_t alignment,
 		   uint64_t flags)
 {
-	struct drm_i915_gem_object *obj = vma->obj;
-
 	if (!drm_mm_node_allocated(&vma->node))
 		return false;
 
@@ -3491,7 +3489,7 @@ i915_vma_misplaced(struct i915_vma *vma,
 	if (alignment && vma->node.start & (alignment - 1))
 		return true;
 
-	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+	if (flags & PIN_MAPPABLE && !vma->map_and_fenceable)
 		return true;
 
 	if (flags & PIN_OFFSET_BIAS &&
@@ -3511,13 +3509,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	bool mappable, fenceable;
 	u32 fence_size, fence_alignment;
 
-	fence_size = i915_gem_get_gtt_size(obj->base.dev,
-					   obj->base.size,
+	fence_size = i915_gem_get_gtt_size(obj->base.dev, vma->size,
 					   obj->tiling_mode);
-	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
-						     obj->base.size,
-						     obj->tiling_mode,
-						     true);
+	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, vma->size,
+						     obj->tiling_mode, true);
 
 	fenceable = (vma->node.size == fence_size &&
 		     (vma->node.start & (fence_alignment - 1)) == 0);
@@ -3525,7 +3520,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	mappable = (vma->node.start + fence_size <=
 		    to_i915(obj->base.dev)->gtt.mappable_end);
 
-	obj->map_and_fenceable = mappable && fenceable;
+	vma->map_and_fenceable = mappable && fenceable;
 }
 
 int
@@ -3593,13 +3588,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 
 		WARN(vma->pin_count,
 		     "bo is already pinned in ggtt with incorrect alignment:"
-		     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
-		     " obj->map_and_fenceable=%d\n",
+		     " offset=%08x %08x, req.alignment=%llx,"
+		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
 		     upper_32_bits(vma->node.start),
 		     lower_32_bits(vma->node.start),
 		     (long long)alignment,
 		     !!(flags & PIN_MAPPABLE),
-		     obj->map_and_fenceable);
+		     vma->map_and_fenceable);
 		ret = i915_vma_unbind(vma);
 		if (ret)
 			return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 569be409c049..d13b7e507b3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -792,7 +792,6 @@ static bool
 eb_vma_misplaced(struct i915_vma *vma)
 {
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	struct drm_i915_gem_object *obj = vma->obj;
 
 	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
 
@@ -812,7 +811,7 @@ eb_vma_misplaced(struct i915_vma *vma)
 		return true;
 
 	/* avoid costly ping-pong once a batch bo ended up non-mappable */
-	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->map_and_fenceable)
 		return !only_mappable_for_reloc(entry->flags);
 
 	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 8ba05a0f15d2..e0f5fba22931 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -124,7 +124,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
 		     (vma->node.start & (vma->node.size - 1)),
 		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08lx) aligned\n",
 		     (long)vma->node.start,
-		     obj->map_and_fenceable,
+		     vma->map_and_fenceable,
 		     (long)vma->node.size);
 
 		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
@@ -378,9 +378,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 			return 0;
 		}
 	} else if (enable) {
-		if (WARN_ON(!obj->map_and_fenceable))
-			return -EINVAL;
-
 		reg = i915_find_fence_reg(dev);
 		if (IS_ERR(reg))
 			return PTR_ERR(reg);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7f57dea246d8..6b0f557982d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -205,6 +205,12 @@ struct i915_vma {
 			unsigned int bound : 4;
 			unsigned int active : I915_NUM_RINGS;
 			bool is_ggtt : 1;
+			/**
+			 * Is the vma/object at the current location in the gtt
+			 * mappable and fenceable? Used to avoid costly
+			 * recalculations.
+			 */
+			bool map_and_fenceable : 1;
 			bool closed : 1;
 		};
 		unsigned int flags;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index f83cb4329c8d..7c2da8060757 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -130,7 +130,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 	if (vma == NULL)
 		return 0;
 
-	if (!obj->map_and_fenceable)
+	if (!vma->map_and_fenceable)
 		return 0;
 
 	if (INTEL_INFO(obj->base.dev)->gen == 3) {
@@ -141,7 +141,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 			goto bad;
 	}
 
-	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+	size = i915_gem_get_gtt_size(obj->base.dev, vma->size, tiling_mode);
 	if (vma->node.size < size)
 		goto bad;
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 313f1fb144b9..218bfd3c99fc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2391,7 +2391,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	 * framebuffer compression.  For simplicity, we always install
 	 * a fence as the cost is not that onerous.
 	 */
-	if (view.type == I915_GGTT_VIEW_NORMAL) {
+	if (vma->map_and_fenceable) {
 		ret = i915_gem_object_get_fence(obj);
 		if (ret == -EDEADLK) {
 			/*
@@ -2430,11 +2430,11 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
 	intel_fill_fb_ggtt_view(&view, fb, state);
+	vma = i915_gem_object_to_ggtt(obj, &view);
 
-	if (view.type == I915_GGTT_VIEW_NORMAL)
+	if (vma->map_and_fenceable)
 		i915_gem_object_unpin_fence(obj);
 
-	vma = i915_gem_object_to_ggtt(obj, &view);
 	i915_gem_object_unpin_from_display_plane(vma);
 }
 
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list