[Intel-gfx] [PATCH 26/34] drm/i915: Move object tracking lists to new mm

Ben Widawsky ben at bwidawsk.net
Sat May 25 21:27:00 CEST 2013


for file in `ls drivers/gpu/drm/i915/*.c` ; do sed -i "s/dev_priv->mm.bound_list/i915_gtt_mm-\>bound_list/" $file; done
for file in `ls drivers/gpu/drm/i915/*.c` ; do sed -i "s/dev_priv->mm.inactive_list/i915_gtt_mm-\>inactive_list/" $file; done
for file in `ls drivers/gpu/drm/i915/*.c` ; do sed -i "s/dev_priv->mm.active_list/i915_gtt_mm-\>active_list/" $file; done

I've also opted to move the comments out of line a bit so one can get a
better picture of what the various lists do.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_debugfs.c    | 20 ++++++++-------
 drivers/gpu/drm/i915/i915_drv.h        | 47 +++++++++++++++-------------------
 drivers/gpu/drm/i915/i915_gem.c        | 33 +++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_debug.c  |  2 +-
 drivers/gpu/drm/i915/i915_gem_evict.c  | 10 ++++----
 drivers/gpu/drm/i915/i915_gem_gtt.c    |  4 +--
 drivers/gpu/drm/i915/i915_gem_stolen.c |  4 +--
 drivers/gpu/drm/i915/i915_irq.c        | 12 ++++-----
 8 files changed, 62 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7a182ce..cab68f6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -158,11 +158,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	switch (list) {
 	case ACTIVE_LIST:
 		seq_printf(m, "Active:\n");
-		head = &dev_priv->mm.active_list;
+		head = &i915_gtt_vm->active_list;
 		break;
 	case INACTIVE_LIST:
 		seq_printf(m, "Inactive:\n");
-		head = &dev_priv->mm.inactive_list;
+		head = &i915_gtt_vm->inactive_list;
 		break;
 	default:
 		mutex_unlock(&dev->struct_mutex);
@@ -215,17 +215,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 		   dev_priv->mm.object_memory);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.bound_list, gtt_list);
+	count_objects(&i915_gtt_vm->bound_list, gtt_list);
 	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.active_list, mm_list);
+	count_objects(&i915_gtt_vm->active_list, mm_list);
 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.inactive_list, mm_list);
+	count_objects(&i915_gtt_vm->inactive_list, mm_list);
 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
@@ -238,7 +238,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
 
 	size = count = mappable_size = mappable_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
 		if (obj->fault_mappable) {
 			size += obj->gtt_space->size;
 			++count;
@@ -283,7 +283,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
 		return ret;
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
 		if (list == PINNED_LIST && obj->pin_count == 0)
 			continue;
 
@@ -1792,7 +1792,8 @@ i915_drop_caches_set(void *data, u64 val)
 		i915_gem_retire_requests(dev);
 
 	if (val & DROP_BOUND) {
-		list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+		list_for_each_entry_safe(obj, next, &i915_gtt_vm->inactive_list,
+					 mm_list)
 			if (obj->pin_count == 0) {
 				ret = i915_gem_object_unbind(obj);
 				if (ret)
@@ -1801,7 +1802,8 @@ i915_drop_caches_set(void *data, u64 val)
 	}
 
 	if (val & DROP_UNBOUND) {
-		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
+					 gtt_list)
 			if (obj->pages_pin_count == 0) {
 				ret = i915_gem_object_put_pages(obj);
 				if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d9c9b7b..ccdf80d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -406,6 +406,25 @@ struct i915_address_space {
 	unsigned long start;		/* Start offset always 0 for dri2 */
 	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
 
+/* We use many types of lists for object tracking:
+ *  bound_list: all objects in the VMA.
+ *	Used to restore mappings on resume
+ *  active_list: List of objects currently involved in rendering.
+ *	Includes buffers having the contents of their GPU caches flushed, not
+ *	necessarily primitives. last_rendering_seqno represents when the
+ *	rendering involved will be completed. A reference is held on the buffer
+ *	while on this list.
+ *  inactive_list: LRU list of objects which are not in the ringbuffer
+ *	objects are ready to unbind but are still mapped.
+ *	last_rendering_seqno is 0 while an object is in this list.
+ *	A reference is not held on the buffer while on this list,
+ *	as merely being GTT-bound shouldn't prevent its being
+ *	freed, and we'll pull it off the list in the free path.
+ */
+	struct list_head bound_list;
+	struct list_head active_list;
+	struct list_head inactive_list;
+
 	struct {
 		dma_addr_t addr;
 		struct page *page;
@@ -777,11 +796,8 @@ struct intel_l3_parity {
 };
 
 struct i915_gem_mm {
-	/** List of all objects in gtt_space. Used to restore gtt
-	 * mappings on resume */
-	struct list_head bound_list;
 	/**
-	 * List of objects which are not bound to the GTT (thus
+	 * List of objects which are not bound to a VM (thus
 	 * are idle and not used by the GPU) but still have
 	 * (presumably uncached) pages still attached.
 	 */
@@ -790,29 +806,6 @@ struct i915_gem_mm {
 	struct shrinker inactive_shrinker;
 	bool shrinker_no_lock_stealing;
 
-	/**
-	 * List of objects currently involved in rendering.
-	 *
-	 * Includes buffers having the contents of their GPU caches
-	 * flushed, not necessarily primitives.  last_rendering_seqno
-	 * represents when the rendering involved will be completed.
-	 *
-	 * A reference is held on the buffer while on this list.
-	 */
-	struct list_head active_list;
-
-	/**
-	 * LRU list of objects which are not in the ringbuffer and
-	 * are ready to unbind, but are still in the GTT.
-	 *
-	 * last_rendering_seqno is 0 while an object is in this list.
-	 *
-	 * A reference is not held on the buffer while on this list,
-	 * as merely being GTT-bound shouldn't prevent its being
-	 * freed, and we'll pull it off the list in the free path.
-	 */
-	struct list_head inactive_list;
-
 	/** LRU list of objects with fence regs on them. */
 	struct list_head fence_list;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5d476a7..eed2ef6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -179,7 +179,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list)
 		if (obj->pin_count)
 			pinned += obj->gtt_space->size;
 	mutex_unlock(&dev->struct_mutex);
@@ -1709,7 +1709,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 	}
 
 	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list,
+				 &i915_gtt_vm->inactive_list,
 				 mm_list) {
 		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
 		    i915_gem_object_unbind(obj) == 0 &&
@@ -1736,7 +1736,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 
 	i915_gem_evict_everything(dev_priv->dev);
 
-	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
+				 gtt_list)
 		i915_gem_object_put_pages(obj);
 }
 
@@ -1883,7 +1884,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 	}
 
 	/* Move from whatever list we were on to the tail of execution. */
-	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+	list_move_tail(&obj->mm_list, &i915_gtt_vm->active_list);
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
 	obj->last_read_seqno = seqno;
@@ -1911,7 +1912,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
 	BUG_ON(!obj->active);
 
-	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+	list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
 
 	list_del_init(&obj->ring_list);
 	obj->ring = NULL;
@@ -2170,12 +2171,8 @@ void i915_gem_reset(struct drm_device *dev)
 	/* Move everything out of the GPU domains to ensure we do any
 	 * necessary invalidation upon reuse.
 	 */
-	list_for_each_entry(obj,
-			    &dev_priv->mm.inactive_list,
-			    mm_list)
-	{
+	list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list)
 		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-	}
 
 	/* The fence registers are invalidated so clear them out */
 	i915_gem_reset_fences(dev);
@@ -3025,8 +3022,8 @@ search_free:
 		return ret;
 	}
 
-	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
-	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+	list_move_tail(&obj->gtt_list, &i915_gtt_vm->bound_list);
+	list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
 
 	obj->gtt_space = node;
 	obj->gtt_offset = node->start;
@@ -3178,7 +3175,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
 	/* And bump the LRU for this access */
 	if (i915_gem_object_is_inactive(obj))
-		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+		list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
 
 	return 0;
 }
@@ -4138,7 +4135,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 		return ret;
 	}
 
-	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&i915_gtt_vm->active_list));
 	mutex_unlock(&dev->struct_mutex);
 
 	ret = drm_irq_install(dev);
@@ -4199,10 +4196,10 @@ i915_gem_load(struct drm_device *dev)
 				  SLAB_HWCACHE_ALIGN,
 				  NULL);
 
-	INIT_LIST_HEAD(&dev_priv->mm.active_list);
-	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+	INIT_LIST_HEAD(&i915_gtt_vm->active_list);
+	INIT_LIST_HEAD(&i915_gtt_vm->inactive_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
-	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
+	INIT_LIST_HEAD(&i915_gtt_vm->bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	for (i = 0; i < I915_NUM_RINGS; i++)
 		init_ring_lists(&dev_priv->ring[i]);
@@ -4498,7 +4495,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
 		if (obj->pages_pin_count == 0)
 			cnt += obj->base.size >> PAGE_SHIFT;
-	list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
+	list_for_each_entry(obj, &i915_gtt_vm->inactive_list, gtt_list)
 		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
 			cnt += obj->base.size >> PAGE_SHIFT;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5..bf945a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
 		}
 	}
 
-	list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+	list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
 		if (obj->base.dev != dev ||
 		    !atomic_read(&obj->base.refcount.refcount)) {
 			DRM_ERROR("freed inactive %p\n", obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 6e620f86..92856a2 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -86,7 +86,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 				 cache_level);
 
 	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list) {
 		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
@@ -95,7 +95,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 		goto none;
 
 	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
 		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
@@ -158,8 +158,8 @@ i915_gem_evict_everything(struct drm_device *dev)
 	bool lists_empty;
 	int ret;
 
-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.active_list));
+	lists_empty = (list_empty(&i915_gtt_vm->inactive_list) &&
+		       list_empty(&i915_gtt_vm->active_list));
 	if (lists_empty)
 		return -ENOSPC;
 
@@ -177,7 +177,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 
 	/* Having flushed everything, unbind() should never raise an error */
 	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list, mm_list)
+				 &i915_gtt_vm->inactive_list, mm_list)
 		if (obj->pin_count == 0)
 			WARN_ON(i915_gem_object_unbind(obj));
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a161213..c278f3c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -447,7 +447,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	if (dev_priv->gtt.aliasing_ppgtt)
 		gen6_write_pdes(dev_priv->gtt.aliasing_ppgtt);
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
 		i915_gem_clflush_object(obj);
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 	}
@@ -656,7 +656,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
 		i915_gtt_vm->mm.color_adjust = i915_gtt_color_adjust;
 
 	/* Mark any preallocated objects as occupied */
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
 		DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
 			      obj->gtt_offset, obj->base.size);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d398333..7b25b2e 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -372,8 +372,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	obj->gtt_offset = gtt_offset;
 	obj->has_global_gtt_mapping = 1;
 
-	list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
-	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+	list_add_tail(&obj->gtt_list, &i915_gtt_vm->bound_list);
+	list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
 
 	return obj;
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f49db99..c78a999 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1682,7 +1682,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
 	}
 
 	seqno = ring->get_seqno(ring, false);
-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
 		if (obj->ring != ring)
 			continue;
 
@@ -1757,7 +1757,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
 	if (ring->id != RCS || !error->ccid)
 		return;
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
 		if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
 			ering->ctx = i915_error_object_create_sized(dev_priv,
 								    obj, 1);
@@ -1891,10 +1891,10 @@ static void i915_capture_error_state(struct drm_device *dev)
 	error->pinned_bo = NULL;
 
 	i = 0;
-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+	list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list)
 		i++;
 	error->active_bo_count = i;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list)
 		if (obj->pin_count)
 			i++;
 	error->pinned_bo_count = i - error->active_bo_count;
@@ -1913,13 +1913,13 @@ static void i915_capture_error_state(struct drm_device *dev)
 		error->active_bo_count =
 			capture_active_bo(error->active_bo,
 					  error->active_bo_count,
-					  &dev_priv->mm.active_list);
+					  &i915_gtt_vm->active_list);
 
 	if (error->pinned_bo)
 		error->pinned_bo_count =
 			capture_pinned_bo(error->pinned_bo,
 					  error->pinned_bo_count,
-					  &dev_priv->mm.bound_list);
+					  &i915_gtt_vm->bound_list);
 
 	do_gettimeofday(&error->time);
 
-- 
1.8.2.3




More information about the Intel-gfx mailing list