[PATCH] drm/i915/ttm: Rework object initialization
Thomas Hellström
thomas.hellstrom at linux.intel.com
Sun Sep 26 20:38:41 UTC 2021
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 56 ++++++++++++++-----------
1 file changed, 31 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index b94497989995..a711b9e60545 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -813,12 +813,8 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
*/
static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
- if (obj->ttm.created) {
- ttm_bo_put(i915_gem_to_ttm(obj));
- } else {
- __i915_gem_free_object(obj);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
- }
+ BUG_ON(!obj->ttm.created);
+ ttm_bo_put(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -898,16 +894,22 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- i915_ttm_backup_free(obj);
+ i915_gem_object_release_memory_region(obj);
- /* This releases all gem object bindings to the backend. */
- __i915_gem_free_object(obj);
+ if (obj->ttm.created) {
+ i915_ttm_backup_free(obj);
- i915_gem_object_release_memory_region(obj);
- mutex_destroy(&obj->ttm.get_io_page.lock);
+ /* This releases all gem object bindings to the backend. */
+ __i915_gem_free_object(obj);
- if (obj->ttm.created)
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ mutex_destroy(&obj->ttm.get_io_page.lock);
+
+ if (obj->ttm.created)
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ } else {
+ /* Caller will free object and GEM placements. */
+ dma_resv_fini(&obj->base._resv);
+ }
}
/**
@@ -934,26 +936,20 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
enum ttm_bo_type bo_type;
int ret;
+ /* Forcing the page size is kernel internal only */
+ GEM_BUG_ON(page_size && obj->mm.n_placements);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
- i915_gem_object_init_memory_region(obj, mem);
- i915_gem_object_make_unshrinkable(obj);
- INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
- mutex_init(&obj->ttm.get_io_page.lock);
+ obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
+ obj->flags = flags;
bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
ttm_bo_type_kernel;
-
- obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
-
- /* Forcing the page size is kernel internal only */
- GEM_BUG_ON(page_size && obj->mm.n_placements);
+ obj->mm.region = intel_memory_region_get(mem);
+ INIT_LIST_HEAD(&obj->mm.region_link);
/*
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
- * Similarly, in delayed_destroy, we can't call ttm_bo_put()
- * until successful initialization.
*/
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
bo_type, &i915_sys_placement,
@@ -962,7 +958,17 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
if (ret)
return i915_ttm_err_to_gem(ret);
+ /*
+ * The object is now present on on TTM LRU lists.
+ * Keep the object locked until all initialization is done.
+ */
obj->ttm.created = true;
+ i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+ i915_gem_object_release_memory_region(obj);
+ i915_gem_object_init_memory_region(obj, mem);
+ i915_gem_object_make_unshrinkable(obj);
+ INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->ttm.get_io_page.lock);
i915_ttm_adjust_domains_after_move(obj);
i915_ttm_adjust_gem_after_move(obj);
i915_gem_object_unlock(obj);
--
2.31.1
More information about the Intel-gfx-trybot
mailing list