[PATCH] drm/i915/ttm: Rework object initialization

Thomas Hellström thomas.hellstrom at linux.intel.com
Sun Sep 26 10:28:20 UTC 2021


Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 51 +++++++++++++------------
 1 file changed, 26 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e0c8692cedcc..284f9397b4fe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -948,12 +948,8 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
  */
 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
 {
-	if (obj->ttm.created) {
-		ttm_bo_put(i915_gem_to_ttm(obj));
-	} else {
-		__i915_gem_free_object(obj);
-		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-	}
+	BUG_ON(!obj->ttm.created);
+	ttm_bo_put(i915_gem_to_ttm(obj));
 }
 
 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -1033,16 +1029,21 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 {
 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 
-	i915_ttm_backup_free(obj);
+	if (obj->ttm.created) {
+		i915_ttm_backup_free(obj);
 
-	/* This releases all gem object bindings to the backend. */
-	__i915_gem_free_object(obj);
+		/* This releases all gem object bindings to the backend. */
+		__i915_gem_free_object(obj);
 
-	i915_gem_object_release_memory_region(obj);
-	mutex_destroy(&obj->ttm.get_io_page.lock);
+		i915_gem_object_release_memory_region(obj);
+		mutex_destroy(&obj->ttm.get_io_page.lock);
 
-	if (obj->ttm.created)
-		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+		if (obj->ttm.created)
+			call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+	} else {
+		/* Caller will free object and GEM placements. */
+		dma_resv_fini(&obj->base._resv);
+	}
 }
 
 /**
@@ -1069,26 +1070,17 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 	enum ttm_bo_type bo_type;
 	int ret;
 
+	/* Forcing the page size is kernel internal only */
+	GEM_BUG_ON(page_size && obj->mm.n_placements);
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
-	i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
-	i915_gem_object_init_memory_region(obj, mem);
-	i915_gem_object_make_unshrinkable(obj);
-	INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
-	mutex_init(&obj->ttm.get_io_page.lock);
+	obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
 	bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
 		ttm_bo_type_kernel;
 
-	obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
-
-	/* Forcing the page size is kernel internal only */
-	GEM_BUG_ON(page_size && obj->mm.n_placements);
-
 	/*
 	 * If this function fails, it will call the destructor, but
 	 * our caller still owns the object. So no freeing in the
 	 * destructor until obj->ttm.created is true.
-	 * Similarly, in delayed_destroy, we can't call ttm_bo_put()
-	 * until successful initialization.
 	 */
 	ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
 				   bo_type, &i915_sys_placement,
@@ -1097,7 +1089,16 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 	if (ret)
 		return i915_ttm_err_to_gem(ret);
 
+	/*
+	 * The object is now present on on TTM LRU lists.
+	 * Keep the object locked until all initialization is done.
+	 */
 	obj->ttm.created = true;
+	i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+	i915_gem_object_init_memory_region(obj, mem);
+	i915_gem_object_make_unshrinkable(obj);
+	INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
+	mutex_init(&obj->ttm.get_io_page.lock);
 	i915_ttm_adjust_domains_after_move(obj);
 	i915_ttm_adjust_gem_after_move(obj);
 	i915_gem_object_unlock(obj);
-- 
2.31.1



More information about the Intel-gfx-trybot mailing list