[PATCH 29/32] drm/i915: Use ttms delayed destroy for all buffer objects
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Wed Oct 13 16:36:15 UTC 2021
The only reason we still have i915_vma.active is to
keep the BO alive. There is a mechanism for that inside
TTM. Use this method instead, by creating a zombie ttm
object that's initialized just well enough to destroy.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_object.c | 28 +++++++++++----------
drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 8 ++++++
drivers/gpu/drm/ttm/ttm_bo.c | 29 ++++++++++++++++++++++
include/drm/ttm/ttm_bo_api.h | 18 ++++++++++++++
4 files changed, 70 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 3d6d7cc77b24..7f9a2c15686b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -269,6 +269,7 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
trace_i915_gem_object_destroy(obj);
GEM_BUG_ON(!list_empty(&obj->lut_list));
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
@@ -289,6 +290,16 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
__i915_gem_object_fini(obj);
}
+static void _i915_gem_free_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (obj->ops->delayed_free)
+ obj->ops->delayed_free(obj);
+ else
+ ttm_bo_release_unmanaged(&i915->bdev, i915_gem_to_ttm(obj), i915_ttm_bo_destroy);
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
@@ -296,18 +307,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
llist_for_each_entry_safe(obj, on, freed, freed) {
might_sleep();
- if (obj->ops->delayed_free) {
- obj->ops->delayed_free(obj);
- continue;
- }
-
- i915_gem_object_lock(obj, NULL);
- __i915_gem_object_pages_fini(obj);
- i915_gem_object_unlock(obj);
- __i915_gem_free_object(obj);
-
- /* But keep the pointer alive for RCU-protected lookups */
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ _i915_gem_free_object(obj);
cond_resched();
}
}
@@ -363,7 +363,9 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
* crude but effective memory throttling.
*/
- if (llist_add(&obj->freed, &i915->mm.free_list))
+ if (!in_atomic() && !irqs_disabled())
+ _i915_gem_free_object(obj);
+ else if (llist_add(&obj->freed, &i915->mm.free_list))
queue_work(i915->wq, &i915->mm.free_work);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 9069c3a3b1a2..4d4b5b125351 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -899,6 +899,14 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ if (obj->ops != &i915_gem_ttm_obj_ops) {
+ /* This releases all gem object bindings to the backend. */
+ __i915_gem_free_object(obj);
+
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ return;
+ }
+
i915_gem_object_release_memory_region(obj);
mutex_destroy(&obj->ttm.get_io_page.lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3934ee225c78..2e07f800d7b2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -470,6 +470,35 @@ static void ttm_bo_release(struct kref *kref)
bo->destroy(bo);
}
+void ttm_bo_release_unmanaged(struct ttm_device *bdev,
+ struct ttm_buffer_object *bo,
+ void (*destroy) (struct ttm_buffer_object *))
+{
+ int ret;
+
+ bo->bdev = bdev;
+ bo->destroy = destroy;
+ atomic_inc(&ttm_glob.bo_count);
+ INIT_LIST_HEAD(&bo->lru);
+ INIT_LIST_HEAD(&bo->ddestroy);
+
+ WARN_ON(bo->deleted);
+
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ dma_resv_wait_timeout(bo->base.resv, true, false,
+ 30 * HZ);
+ }
+
+ bo->deleted = true;
+
+ ttm_bo_release(&bo->kref);
+}
+EXPORT_SYMBOL(ttm_bo_release_unmanaged);
+
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0551e2587f14..f140fb017581 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -292,6 +292,24 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
void ttm_bo_put(struct ttm_buffer_object *bo);
+/**
+ * ttm_bo_release_unmanaged
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: The buffer object.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * Destroy a ttm_bo, for a BO that is not initialized with
+ * ttm_bo_init, and otherwise completely unmanaged by ttm.
+ *
+ * This allows using the ttm delayed destroy mechanism for buffer
+ * objects that were not managed by TTM, and not initialized using
+ * ttm_bo_init().
+ */
+void ttm_bo_release_unmanaged(struct ttm_device *bdev,
+ struct ttm_buffer_object *bo,
+ void (*destroy) (struct ttm_buffer_object *));
+
/**
* ttm_bo_move_to_lru_tail
*
--
2.33.0
More information about the Intel-gfx-trybot
mailing list