[PATCH] drm/ttm: Fix the memory delay free issue

Emily Deng Emily.Deng at amd.com
Wed Jul 10 09:29:31 UTC 2019


For vulkan cts allocation test cases, they will create a series of bos, and then free
them. As it has lots of alloction test cases with the same vm, as per vm
bo feature enable, all of those bos' resv are the same. But the bo free is quite slow,
as they use the same resv object, for every time, free a bo,
it will check the resv whether signal, if it signal, then will free it. But
as the test cases will continue to create bo, and the resv fence is increasing. So the
free is more slower than creating. It will cause memory exhausting.

Method:
When the resv signal, release all the bos which are use the same
resv object.

Signed-off-by: Emily Deng <Emily.Deng at amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f9a3d4c..57ec59b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -543,6 +543,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 {
 	struct ttm_bo_global *glob = bo->bdev->glob;
 	struct reservation_object *resv;
+	struct ttm_buffer_object *resv_bo, *resv_bo_next;
 	int ret;
 
 	if (unlikely(list_empty(&bo->ddestroy)))
@@ -566,10 +567,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 							   interruptible,
 							   30 * HZ);
 
-		if (lret < 0)
+		if (lret < 0) {
+			kref_put(&bo->list_kref, ttm_bo_release_list);
 			return lret;
-		else if (lret == 0)
+		}
+		else if (lret == 0) {
+			kref_put(&bo->list_kref, ttm_bo_release_list);
 			return -EBUSY;
+		}
 
 		spin_lock(&glob->lru_lock);
 		if (unlock_resv && !kcl_reservation_object_trylock(bo->resv)) {
@@ -582,6 +587,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			 * here.
 			 */
 			spin_unlock(&glob->lru_lock);
+			kref_put(&bo->list_kref, ttm_bo_release_list);
 			return 0;
 		}
 		ret = 0;
@@ -591,15 +597,29 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 		if (unlock_resv)
 			kcl_reservation_object_unlock(bo->resv);
 		spin_unlock(&glob->lru_lock);
+		kref_put(&bo->list_kref, ttm_bo_release_list);
 		return ret;
 	}
 
 	ttm_bo_del_from_lru(bo);
 	list_del_init(&bo->ddestroy);
 	kref_put(&bo->list_kref, ttm_bo_ref_bug);
-
 	spin_unlock(&glob->lru_lock);
 	ttm_bo_cleanup_memtype_use(bo);
+	kref_put(&bo->list_kref, ttm_bo_release_list);
+
+	spin_lock(&glob->lru_lock);
+	list_for_each_entry_safe(resv_bo, resv_bo_next, &bo->bdev->ddestroy, ddestroy) {
+		if (resv_bo->resv == bo->resv) {
+			ttm_bo_del_from_lru(resv_bo);
+			list_del_init(&resv_bo->ddestroy);
+			spin_unlock(&glob->lru_lock);
+			ttm_bo_cleanup_memtype_use(resv_bo);
+			kref_put(&resv_bo->list_kref, ttm_bo_release_list);
+			spin_lock(&glob->lru_lock);
+		}
+	}
+	spin_unlock(&glob->lru_lock);
 
 	if (unlock_resv)
 		kcl_reservation_object_unlock(bo->resv);
@@ -639,9 +659,8 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 		} else {
 			spin_unlock(&glob->lru_lock);
+			kref_put(&bo->list_kref, ttm_bo_release_list);
 		}
-
-		kref_put(&bo->list_kref, ttm_bo_release_list);
 		spin_lock(&glob->lru_lock);
 	}
 	list_splice_tail(&removed, &bdev->ddestroy);
-- 
2.7.4



More information about the amd-gfx mailing list