[Intel-gfx] [PATCH 2/3] drm/i915: Use the new vm [un]bind functions

Ben Widawsky ben at bwidawsk.net
Sat Jul 13 06:45:55 CEST 2013


Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.

Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h            | 10 -----
 drivers/gpu/drm/i915/i915_gem.c            | 36 +++++++--------
 drivers/gpu/drm/i915/i915_gem_context.c    |  6 ++-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 19 ++++----
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 70 +++++++++---------------------
 5 files changed, 52 insertions(+), 89 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c2a9c98..8f9569b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1923,18 +1923,8 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 
 /* i915_gem_gtt.c */
 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-			    struct drm_i915_gem_object *obj,
-			    enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-			      struct drm_i915_gem_object *obj);
-
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-/* FIXME: this is never okay with full PPGTT */
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-				enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90d49fb..8e7a12d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2655,12 +2655,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 
 	trace_i915_gem_object_unbind(obj, vm);
 
-	if (obj->has_global_gtt_mapping && i915_is_ggtt(vm))
-		i915_gem_gtt_unbind_object(obj);
-	if (obj->has_aliasing_ppgtt_mapping) {
-		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
-		obj->has_aliasing_ppgtt_mapping = 0;
-	}
+
+	vm->unbind_object(vm, obj);
+
 	i915_gem_gtt_finish_object(obj);
 	i915_gem_object_unpin_pages(obj);
 
@@ -3393,7 +3390,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level)
 {
 	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
 	int ret;
 
@@ -3428,13 +3424,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				return ret;
 		}
 
-		if (obj->has_global_gtt_mapping)
-			i915_gem_gtt_bind_object(obj, cache_level);
-		if (obj->has_aliasing_ppgtt_mapping)
-			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-					       obj, cache_level);
-
-		i915_gem_obj_set_color(obj, vma->vm, cache_level);
+		vm->bind_object(vm, obj, cache_level);
+		i915_gem_obj_set_color(obj, vm, cache_level);
 	}
 
 	if (cache_level == I915_CACHE_NONE) {
@@ -3716,6 +3707,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    bool map_and_fenceable,
 		    bool nonblocking)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
 	if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -3741,20 +3733,24 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	}
 
 	if (!i915_gem_obj_bound(obj, vm)) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
 		ret = i915_gem_object_bind_to_gtt(obj, vm, alignment,
 						  map_and_fenceable,
 						  nonblocking);
 		if (ret)
 			return ret;
 
-		if (!dev_priv->mm.aliasing_ppgtt)
-			i915_gem_gtt_bind_object(obj, obj->cache_level);
+		if (!dev_priv->mm.aliasing_ppgtt) {
+			dev_priv->gtt.base.bind_object(&dev_priv->gtt.base,
+						       obj,
+						       obj->cache_level);
+		}
 	}
 
-	if (!obj->has_global_gtt_mapping && map_and_fenceable)
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
+	if (!obj->has_global_gtt_mapping && map_and_fenceable) {
+		dev_priv->gtt.base.bind_object(&dev_priv->gtt.base,
+					       obj,
+					       obj->cache_level);
+	}
 
 	obj->pin_count++;
 	obj->pin_mappable |= map_and_fenceable;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c92fd81..177e42c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -424,8 +424,10 @@ static int do_switch(struct i915_hw_context *to)
 		return ret;
 	}
 
-	if (!to->obj->has_global_gtt_mapping)
-		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+	if (!to->obj->has_global_gtt_mapping) {
+		dev_priv->gtt.base.bind_object(&dev_priv->gtt.base,
+					       to->obj, to->obj->cache_level);
+	}
 
 	if (!to->is_initialized || is_default_context(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 725dd7f..9e9d955 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -197,8 +197,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	if (unlikely(IS_GEN6(dev) &&
 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
 	    !target_i915_obj->has_global_gtt_mapping)) {
-		i915_gem_gtt_bind_object(target_i915_obj,
-					 target_i915_obj->cache_level);
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		dev_priv->gtt.base.bind_object(&dev_priv->gtt.base,
+					       target_i915_obj,
+					       target_i915_obj->cache_level);
 	}
 
 	/* Validate that the target is in a valid r/w GPU domain */
@@ -438,10 +440,9 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
 
 	/* Ensure ppgtt mapping exists if needed */
 	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-				       obj, obj->cache_level);
-
-		obj->has_aliasing_ppgtt_mapping = 1;
+		struct i915_address_space *appgtt;
+		appgtt = &dev_priv->mm.aliasing_ppgtt->base;
+		appgtt->bind_object(appgtt, obj, obj->cache_level);
 	}
 
 	if (entry->offset != i915_gem_obj_offset(obj, vm)) {
@@ -456,7 +457,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
 
 	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
 	    !obj->has_global_gtt_mapping)
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
+		dev_priv->gtt.base.bind_object(&dev_priv->gtt.base,
+					       obj, obj->cache_level);
 
 	return 0;
 }
@@ -1046,7 +1048,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * hsw should have this fixed, but let's be paranoid and do it
 	 * unconditionally for now. */
 	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
-		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+		dev_priv->gtt.base.bind_object(&dev_priv->gtt.base, batch_obj,
+					       batch_obj->cache_level);
 
 	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 31ff971..31bffb9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -393,18 +393,6 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 	dev_priv->mm.aliasing_ppgtt = NULL;
 }
 
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-			    struct drm_i915_gem_object *obj,
-			    enum i915_cache_level cache_level)
-{
-	struct i915_address_space *vm = &ppgtt->base;
-	unsigned long obj_offset = i915_gem_obj_offset(obj, vm);
-
-	vm->insert_entries(vm, obj->pages,
-			   obj_offset >> PAGE_SHIFT,
-			   cache_level);
-}
-
 static void gen6_ppgtt_bind_object(struct i915_address_space *vm,
 				   struct drm_i915_gem_object *obj,
 				   enum i915_cache_level cache_level)
@@ -416,16 +404,6 @@ static void gen6_ppgtt_bind_object(struct i915_address_space *vm,
 	obj->has_aliasing_ppgtt_mapping = 1;
 }
 
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-			      struct drm_i915_gem_object *obj)
-{
-	struct i915_address_space *vm = &ppgtt->base;
-	unsigned long obj_offset = i915_gem_obj_offset(obj, vm);
-
-	vm->clear_range(vm, obj_offset >> PAGE_SHIFT,
-			obj->base.size >> PAGE_SHIFT);
-}
-
 static void gen6_ppgtt_unbind_object(struct i915_address_space *vm,
 				     struct drm_i915_gem_object *obj)
 {
@@ -489,7 +467,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		i915_gem_clflush_object(obj);
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
+		dev_priv->gtt.base.bind_object(&dev_priv->gtt.base,
+					       obj, obj->cache_level);
 	}
 
 	i915_gem_chipset_flush(dev);
@@ -620,33 +599,16 @@ static void gen6_ggtt_bind_object(struct i915_address_space *vm,
 	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 	gen6_ggtt_insert_entries(vm, obj->pages, entry, cache_level);
 	obj->has_global_gtt_mapping = 1;
-}
-
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-			      enum i915_cache_level cache_level)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-	dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
-					  entry,
-					  cache_level);
 
-	obj->has_global_gtt_mapping = 1;
-}
-
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-				       entry,
-				       obj->base.size >> PAGE_SHIFT);
-
-	obj->has_global_gtt_mapping = 0;
+	/* GGTT bound buffers are special cases with aliasing PPGTT. Assume we
+	 * always want to do both */
+	if (obj->has_aliasing_ppgtt_mapping) {
+		struct drm_device *dev = obj->base.dev;
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		struct i915_address_space *appgtt;
+		appgtt = &dev_priv->mm.aliasing_ppgtt->base;
+		appgtt->bind_object(appgtt, obj, cache_level);
+	}
 }
 
 static void gen6_ggtt_unbind_object(struct i915_address_space *vm,
@@ -656,6 +618,16 @@ static void gen6_ggtt_unbind_object(struct i915_address_space *vm,
 
 	gen6_ggtt_clear_range(vm, entry, obj->base.size >> PAGE_SHIFT);
 	obj->has_global_gtt_mapping = 0;
+
+	/* GGTT bound buffers are special cases with aliasing PPGTT. Assume we
+	 * always want to do both */
+	if (obj->has_aliasing_ppgtt_mapping) {
+		struct drm_device *dev = obj->base.dev;
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		struct i915_address_space *appgtt;
+		appgtt = &dev_priv->mm.aliasing_ppgtt->base;
+		appgtt->unbind_object(appgtt, obj);
+	}
 }
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
-- 
1.8.3.2




More information about the Intel-gfx mailing list