[Intel-gfx] [PATCH 33/34] drm/i915: Create VMAs (part 2)

Ben Widawsky ben at bwidawsk.net
Sat May 25 21:27:07 CEST 2013


Remove the obj->gtt_space. See if it still works. This validates that
what we did in part 1 was correct.

BISECT WARNING: This patch was not meant for bisect. If it does end up
upstream, it should be included in the 3 part series for creating the
VMA.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h        | 16 +++++++++----
 drivers/gpu/drm/i915/i915_gem.c        | 42 ++++++++++++----------------------
 drivers/gpu/drm/i915/i915_gem_evict.c  | 16 +++++++------
 drivers/gpu/drm/i915/i915_gem_gtt.c    | 11 +++------
 drivers/gpu/drm/i915/i915_gem_stolen.c | 18 ++-------------
 5 files changed, 39 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 324ab0f..19f35f4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1160,7 +1160,6 @@ struct drm_i915_gem_object {
 
 	const struct drm_i915_gem_object_ops *ops;
 
-	struct drm_mm_node *gtt_space;
 	struct list_head vma_list;
 
 	/** Stolen memory for this object, instead of being backed by shmem. */
@@ -1290,25 +1289,32 @@ struct drm_i915_gem_object {
 
 static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o)
 {
+	struct i915_vma *vma;
 	BUG_ON(list_empty(&o->vma_list));
-	return o->gtt_space->start;
+	vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
+	return vma->node.start;
 }
 
 static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o)
 {
-	return o->gtt_space != NULL;
+	return !list_empty(&o->vma_list);
 }
 
 static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o)
 {
+	struct i915_vma *vma;
 	BUG_ON(list_empty(&o->vma_list));
-	return o->gtt_space->size;
+	vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
+	return vma->node.size;
 }
 
 static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
 					  enum i915_cache_level color)
 {
-	o->gtt_space->color = color;
+	struct i915_vma *vma;
+	BUG_ON(list_empty(&o->vma_list));
+	vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
+	vma->node.color = color;
 }
 
 /* This is a temporary define to help transition us to real VMAs. If you see
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d82863c..9b65faf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2518,12 +2518,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 
 	vma = __i915_obj_to_vma(obj);
 	list_del_init(&vma->vma_link);
-//	drm_mm_remove_node(&vma->node);
+	drm_mm_remove_node(&vma->node);
 	i915_gem_vma_destroy(vma);
 
-	drm_mm_put_block(obj->gtt_space);
-	obj->gtt_space = NULL;
-
 	return 0;
 }
 
@@ -2947,7 +2944,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 {
 	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_mm_node *node;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
 	bool mappable, fenceable;
 	size_t max = map_and_fenceable ?
@@ -2993,20 +2989,14 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_pin_pages(obj);
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (node == NULL) {
-		i915_gem_object_unpin_pages(obj);
-		return -ENOMEM;
-	}
 	vma = i915_gem_vma_create(obj);
 	if (vma == NULL) {
-		kfree(node);
 		i915_gem_object_unpin_pages(obj);
 		return -ENOMEM;
 	}
 
 search_free:
-	ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, node,
+	ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, &vma->node,
 						  size, alignment,
 						  obj->cache_level, 0, max,
 						  DRM_MM_CREATE_DEFAULT,
@@ -3020,36 +3010,34 @@ search_free:
 			goto search_free;
 
 		i915_gem_object_unpin_pages(obj);
-		kfree(node);
+		i915_gem_vma_destroy(vma);
 		return ret;
 	}
-	if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+					      obj->cache_level))) {
 		i915_gem_object_unpin_pages(obj);
-		drm_mm_put_block(node);
+		drm_mm_remove_node(&vma->node);
+		i915_gem_vma_destroy(vma);
 		return -EINVAL;
 	}
 
 	ret = i915_gem_gtt_prepare_object(obj);
 	if (ret) {
 		i915_gem_object_unpin_pages(obj);
-		drm_mm_put_block(node);
+		drm_mm_remove_node(&vma->node);
+		i915_gem_vma_destroy(vma);
 		return ret;
 	}
 
 	list_move_tail(&obj->gtt_list, &i915_gtt_vm->bound_list);
 	list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
-
-	obj->gtt_space = node;
-	vma->node.start = node->start;
-	vma->node.size = node->size;
 	list_add(&vma->vma_link, &obj->vma_list);
 
-	fenceable =
-		node->size == fence_size &&
-		(node->start & (fence_alignment - 1)) == 0;
+	fenceable = i915_gem_obj_size(obj) == fence_size &&
+		(i915_gem_obj_offset(obj) & (fence_alignment - 1)) == 0;
 
 	mappable =
-		node->start + obj->base.size <= dev_priv->gtt.mappable_end;
+		vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
 
 	obj->map_and_fenceable = mappable && fenceable;
 
@@ -3212,10 +3200,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		return -EBUSY;
 	}
 
-	if (i915_gem_obj_bound(obj)) {
-		node = obj->gtt_space;
-		BUG_ON(node->start != __i915_obj_to_vma(obj)->node.start);
-	}
+	if (i915_gem_obj_bound(obj))
+		node = &__i915_obj_to_vma(obj)->node;
 
 	if (!i915_gem_valid_gtt_space(dev, node, cache_level)) {
 		ret = i915_gem_object_unbind(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 44f2b99..a9ffc2c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,12 +34,13 @@
 static bool
 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
+	struct i915_vma *vma = __i915_obj_to_vma(obj);
+
 	if (obj->pin_count)
 		return false;
 
 	list_add(&obj->exec_list, unwind);
-	BUG_ON(__i915_obj_to_vma(obj)->node.start != i915_gem_obj_offset(obj));
-	return drm_mm_scan_add_block(obj->gtt_space);
+	return drm_mm_scan_add_block(&vma->node);
 }
 
 int
@@ -50,6 +51,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	int ret = 0;
 
 	trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -108,9 +110,8 @@ none:
 				       struct drm_i915_gem_object,
 				       exec_list);
 
-
-		BUG_ON(__i915_obj_to_vma(obj)->node.start != i915_gem_obj_offset(obj));
-		ret = drm_mm_scan_remove_block(obj->gtt_space);
+		vma = __i915_obj_to_vma(obj);
+		ret = drm_mm_scan_remove_block(&vma->node);
 		BUG_ON(ret);
 
 		list_del_init(&obj->exec_list);
@@ -127,11 +128,12 @@ found:
 	 * temporary list. */
 	INIT_LIST_HEAD(&eviction_list);
 	while (!list_empty(&unwind_list)) {
+		struct i915_vma *vma;
 		obj = list_first_entry(&unwind_list,
 				       struct drm_i915_gem_object,
 				       exec_list);
-		BUG_ON(__i915_obj_to_vma(obj)->node.start != i915_gem_obj_offset(obj));
-		if (drm_mm_scan_remove_block(obj->gtt_space)) {
+		vma = __i915_obj_to_vma(obj);
+		if (drm_mm_scan_remove_block(&vma->node)) {
 			list_move(&obj->exec_list, &eviction_list);
 			drm_gem_object_reference(&obj->base);
 			continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2e97361..ff3c3cb 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -657,21 +657,16 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
 
 	/* Mark any preallocated objects as occupied */
 	list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
-		uintptr_t gtt_offset = (uintptr_t)obj->gtt_space;
+		struct i915_vma *vma = __i915_obj_to_vma(obj);
+		uintptr_t gtt_offset = (uintptr_t)vma->deferred_offset;
 		int ret;
 		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
 			      i915_gem_obj_offset(obj), obj->base.size);
 
 		BUG_ON((gtt_offset & I915_GTT_RESERVED) == 0);
-		BUG_ON((__i915_obj_to_vma(obj)->deferred_offset & I915_GTT_RESERVED) == 0);
 		gtt_offset = gtt_offset & ~I915_GTT_RESERVED;
-		obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
-		if (!obj->gtt_space) {
-			DRM_ERROR("Failed to preserve all objects\n");
-			break;
-		}
 		ret = drm_mm_create_block(&i915_gtt_vm->mm,
-					  obj->gtt_space,
+					  &vma->node,
 					  gtt_offset,
 					  obj->base.size);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f057b7c..2418963 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -369,31 +369,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	 * later.
 	 */
 	if (drm_mm_initialized(&i915_gtt_vm->mm)) {
-		obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
-		if (!obj->gtt_space) {
-			i915_gem_vma_destroy(vma);
-			drm_gem_object_unreference(&obj->base);
-			return NULL;
-		}
-		ret = drm_mm_create_block(&i915_gtt_vm->mm, obj->gtt_space,
+		ret = drm_mm_create_block(&i915_gtt_vm->mm, &vma->node,
 					  gtt_offset, size);
 		if (ret) {
 			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
 			i915_gem_vma_destroy(vma);
 			drm_gem_object_unreference(&obj->base);
-			kfree(obj->gtt_space);
 			return NULL;
 		}
-		vma->node.start = obj->gtt_space->start;
-		vma->node.size = obj->gtt_space->size;
-		obj->gtt_space->start = gtt_offset;
 		list_add(&vma->vma_link, &obj->vma_list);
-	} else {
-		/* NB: Safe because we assert page alignment */
-		obj->gtt_space = (struct drm_mm_node *)
-			((uintptr_t)gtt_offset | I915_GTT_RESERVED);
+	} else
 		vma->deferred_offset = gtt_offset | I915_GTT_RESERVED;
-	}
 
 	obj->has_global_gtt_mapping = 1;
 
-- 
1.8.2.3




More information about the Intel-gfx mailing list