[PATCH 27/31] drm/i915: Use ttms delayed destroy for all buffer objects

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Wed Sep 22 14:48:57 UTC 2021


Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c | 32 ++++++++++++++++------
 drivers/gpu/drm/ttm/ttm_bo.c               | 27 ++++++++++++++++++
 include/drm/ttm/ttm_bo_api.h               | 18 ++++++++++++
 3 files changed, 69 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index dc0d2da297a0..6b8db0ea226b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -256,6 +256,26 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
 		i915_vm_resv_put(obj->shares_resv_from);
 }
 
+static void i915_nonttm_bo_destroy(struct ttm_buffer_object *bo)
+{
+	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+
+	/* This releases all gem object bindings to the backend. */
+	__i915_gem_free_object(obj);
+
+	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
+
+static void _i915_gem_free_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+	if (obj->ops->delayed_free)
+		obj->ops->delayed_free(obj);
+	else
+		ttm_bo_release_unmanaged(&i915->bdev, i915_gem_to_ttm(obj), i915_nonttm_bo_destroy);
+}
+
 static void __i915_gem_free_objects(struct drm_i915_private *i915,
 				    struct llist_node *freed)
 {
@@ -263,14 +283,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 	llist_for_each_entry_safe(obj, on, freed, freed) {
 		might_sleep();
-		if (obj->ops->delayed_free) {
-			obj->ops->delayed_free(obj);
-			continue;
-		}
-		__i915_gem_free_object(obj);
 
-		/* But keep the pointer alive for RCU-protected lookups */
-		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+		_i915_gem_free_object(obj);
 		cond_resched();
 	}
 }
@@ -326,7 +340,9 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	 * crude but effective memory throttling.
 	 */
 
-	if (llist_add(&obj->freed, &i915->mm.free_list))
+	if (!in_atomic() && !irqs_disabled())
+		_i915_gem_free_object(obj);
+	else if (llist_add(&obj->freed, &i915->mm.free_list))
 		queue_work(i915->wq, &i915->mm.free_work);
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3b22c0013dbf..04a6738d694f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -478,6 +478,33 @@ static void ttm_bo_release(struct kref *kref)
 	bo->destroy(bo);
 }
 
+void ttm_bo_release_unmanaged(struct ttm_device *bdev,
+			      struct ttm_buffer_object *bo,
+			      void (*destroy) (struct ttm_buffer_object *))
+{
+	int ret;
+
+	bo->bdev = bdev;
+	bo->destroy = destroy;
+	atomic_inc(&ttm_glob.bo_count);
+
+	WARN_ON(bo->deleted);
+
+	ret = ttm_bo_individualize_resv(bo);
+	if (ret) {
+		/* Last resort, if we fail to allocate memory for the
+		 * fences block for the BO to become idle
+		 */
+		dma_resv_wait_timeout(bo->base.resv, true, false,
+				      30 * HZ);
+	}
+
+	bo->deleted = true;
+
+	ttm_bo_release(&bo->kref);
+}
+EXPORT_SYMBOL(ttm_bo_release_unmanaged);
+
 void ttm_bo_put(struct ttm_buffer_object *bo)
 {
 	kref_put(&bo->kref, ttm_bo_release);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 76d7c33884da..0dd192be9f4c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -292,6 +292,24 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
  */
 void ttm_bo_put(struct ttm_buffer_object *bo);
 
+/**
+ * ttm_bo_release_unmanaged
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: The buffer object.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * Destroy a ttm_bo, for a BO that is not initialized with
+ * ttm_bo_init, and otherwise completely unmanaged by ttm.
+ *
+ * This allows using the ttm delayed destroy mechanism for buffer
+ * objects that were not managed by TTM, and not initialized using
+ * ttm_bo_init().
+ */
+void ttm_bo_release_unmanaged(struct ttm_device *bdev,
+			      struct ttm_buffer_object *bo,
+			      void (*destroy) (struct ttm_buffer_object *));
+
 /**
  * ttm_bo_move_to_lru_tail
  *
-- 
2.33.0



More information about the Intel-gfx-trybot mailing list