[Intel-gfx] [PATCH 1/3] drm/i915: Add bind/unbind object functions to VM

Ben Widawsky ben at bwidawsk.net
Sat Jul 13 06:45:54 CEST 2013


As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.

NOTE: At some point in the future, brining back insert_entries may in
fact be desirable in order to use 1 bind/unbind for multiple generations
of PPGTT. For now however, it's just not necessary.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h     |  9 +++++
 drivers/gpu/drm/i915/i915_gem_gtt.c | 72 +++++++++++++++++++++++++++++++++++++
 2 files changed, 81 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e6694ae..c2a9c98 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -484,9 +484,18 @@ struct i915_address_space {
 	/* FIXME: Need a more generic return type */
 	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
 				     enum i915_cache_level level);
+
+	/** Unmap an object from an address space. This usually consists of
+	 * setting the valid PTE entries to a reserved scratch page. */
+	void (*unbind_object)(struct i915_address_space *vm,
+			      struct drm_i915_gem_object *obj);
 	void (*clear_range)(struct i915_address_space *vm,
 			    unsigned int first_entry,
 			    unsigned int num_entries);
+	/* Map an object into an address space with the given cache flags. */
+	void (*bind_object)(struct i915_address_space *vm,
+			    struct drm_i915_gem_object *obj,
+			    enum i915_cache_level cache_level);
 	void (*insert_entries)(struct i915_address_space *vm,
 			       struct sg_table *st,
 			       unsigned int first_entry,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c0d0223..31ff971 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -45,6 +45,12 @@
 #define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
 
+static void gen6_ppgtt_bind_object(struct i915_address_space *vm,
+				   struct drm_i915_gem_object *obj,
+				   enum i915_cache_level cache_level);
+static void gen6_ppgtt_unbind_object(struct i915_address_space *vm,
+				     struct drm_i915_gem_object *obj);
+
 static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
 				      enum i915_cache_level level)
 {
@@ -285,7 +291,9 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	}
 	ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
 	ppgtt->enable = gen6_ppgtt_enable;
+	ppgtt->base.unbind_object = gen6_ppgtt_unbind_object;
 	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+	ppgtt->base.bind_object = gen6_ppgtt_bind_object;
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
@@ -397,6 +405,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 			   cache_level);
 }
 
+static void gen6_ppgtt_bind_object(struct i915_address_space *vm,
+				   struct drm_i915_gem_object *obj,
+				   enum i915_cache_level cache_level)
+{
+	const unsigned long entry = i915_gem_obj_offset(obj, vm);
+
+	gen6_ppgtt_insert_entries(vm, obj->pages, entry >> PAGE_SHIFT,
+				  cache_level);
+	obj->has_aliasing_ppgtt_mapping = 1;
+}
+
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 			      struct drm_i915_gem_object *obj)
 {
@@ -407,6 +426,16 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 			obj->base.size >> PAGE_SHIFT);
 }
 
+static void gen6_ppgtt_unbind_object(struct i915_address_space *vm,
+				     struct drm_i915_gem_object *obj)
+{
+	const unsigned long entry = i915_gem_obj_offset(obj, vm);
+
+	gen6_ppgtt_clear_range(vm, entry >> PAGE_SHIFT,
+			       obj->base.size >> PAGE_SHIFT);
+	obj->has_aliasing_ppgtt_mapping = 0;
+}
+
 extern int intel_iommu_gfx_mapped;
 /* Certain Gen5 chipsets require require idling the GPU before
  * unmapping anything from the GTT when VT-d is enabled.
@@ -555,6 +584,18 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 
 }
 
+static void i915_ggtt_bind_object(struct i915_address_space *vm,
+				  struct drm_i915_gem_object *obj,
+				  enum i915_cache_level cache_level)
+{
+	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+	BUG_ON(!i915_is_ggtt(vm));
+	intel_gtt_insert_sg_entries(obj->pages, entry, flags);
+}
+
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
 				  unsigned int first_entry,
 				  unsigned int num_entries)
@@ -562,6 +603,24 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
 	intel_gtt_clear_range(first_entry, num_entries);
 }
 
+static void i915_ggtt_unbind_object(struct i915_address_space *vm,
+				    struct drm_i915_gem_object *obj)
+{
+	const unsigned int first = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+	const unsigned int size = obj->base.size >> PAGE_SHIFT;
+
+	BUG_ON(!i915_is_ggtt(vm));
+	intel_gtt_clear_range(first, size);
+}
+
+static void gen6_ggtt_bind_object(struct i915_address_space *vm,
+				  struct drm_i915_gem_object *obj,
+				  enum i915_cache_level cache_level)
+{
+	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+	gen6_ggtt_insert_entries(vm, obj->pages, entry, cache_level);
+	obj->has_global_gtt_mapping = 1;
+}
 
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 			      enum i915_cache_level cache_level)
@@ -590,6 +649,15 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 	obj->has_global_gtt_mapping = 0;
 }
 
+static void gen6_ggtt_unbind_object(struct i915_address_space *vm,
+				    struct drm_i915_gem_object *obj)
+{
+	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+
+	gen6_ggtt_clear_range(vm, entry, obj->base.size >> PAGE_SHIFT);
+	obj->has_global_gtt_mapping = 0;
+}
+
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
@@ -823,7 +891,9 @@ static int gen6_gmch_probe(struct drm_device *dev,
 		DRM_ERROR("Scratch setup failed\n");
 
 	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+	dev_priv->gtt.base.unbind_object = gen6_ggtt_unbind_object;
 	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
+	dev_priv->gtt.base.bind_object = gen6_ggtt_bind_object;
 
 	return ret;
 }
@@ -855,7 +925,9 @@ static int i915_gmch_probe(struct drm_device *dev,
 
 	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
 	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+	dev_priv->gtt.base.unbind_object = i915_ggtt_unbind_object;
 	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
+	dev_priv->gtt.base.bind_object = i915_ggtt_bind_object;
 
 	return 0;
 }
-- 
1.8.3.2




More information about the Intel-gfx mailing list