[PATCH] drm/ttm: revert "drop the extra reservation for pipelined BO moves"

Christian König ckoenig.leichtzumerken at gmail.com
Thu Dec 6 09:57:11 UTC 2018


This patch caused trouble because of not handled corner cases during
memory pressure.

The extra overhead of checking if we have enough space doesn't worth the
trouble, so just revert it.

This reverts commit 5786b66c9e3b7b18f3c24566e70cae450969cb14 and
commit 7d9a7a3bddb537bb05d297423df539bffa1aa501.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 30 +++++++++++++++++++++---------
 1 file changed, 21 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ffd68b039d23..d87935bf8e30 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -866,11 +866,12 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
 /**
  * Add the last move fence to the BO and reserve a new shared slot.
  */
-static void ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
-				  struct ttm_mem_type_manager *man,
-				  struct ttm_mem_reg *mem)
+static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+				 struct ttm_mem_type_manager *man,
+				 struct ttm_mem_reg *mem)
 {
 	struct dma_fence *fence;
+	int ret;
 
 	spin_lock(&man->move_lock);
 	fence = dma_fence_get(man->move);
@@ -878,9 +879,16 @@ static void ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 
 	if (fence) {
 		reservation_object_add_shared_fence(bo->resv, fence);
+
+		ret = reservation_object_reserve_shared(bo->resv, 1);
+		if (unlikely(ret))
+			return ret;
+
 		dma_fence_put(bo->moving);
 		bo->moving = fence;
 	}
+
+	return 0;
 }
 
 /**
@@ -908,8 +916,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 			return ret;
 	} while (1);
 	mem->mem_type = mem_type;
-	ttm_bo_add_move_fence(bo, man, mem);
-	return 0;
+	return ttm_bo_add_move_fence(bo, man, mem);
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -978,6 +985,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	bool has_erestartsys = false;
 	int i, ret;
 
+	ret = reservation_object_reserve_shared(bo->resv, 1);
+	if (unlikely(ret))
+		return ret;
+
 	mem->mm_node = NULL;
 	for (i = 0; i < placement->num_placement; ++i) {
 		const struct ttm_place *place = &placement->placement[i];
@@ -1013,7 +1024,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 			return ret;
 
 		if (mem->mm_node) {
-			ttm_bo_add_move_fence(bo, man, mem);
+			ret = ttm_bo_add_move_fence(bo, man, mem);
+			if (unlikely(ret)) {
+				(*man->func->put_node)(man, mem);
+				return ret;
+			}
 			break;
 		}
 	}
@@ -1264,9 +1279,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		WARN_ON(!locked);
 	}
 
-	if (likely(!ret))
-		ret = reservation_object_reserve_shared(bo->resv, 1);
-
 	if (likely(!ret))
 		ret = ttm_bo_validate(bo, placement, ctx);
 
-- 
2.14.1



More information about the dri-devel mailing list