[PATCH 1/2] drm/ttm: cleanup ttm_mem_type_manager_func.get_node interface v2

Christian König ckoenig.leichtzumerken at gmail.com
Mon Jun 29 12:21:31 UTC 2020


Instead of signaling failure by setting the node pointer to
NULL do so by returning -ENOSPC.

v2: add memset() to make sure that mem is always initialized.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c   |  4 +---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c  |  5 ++---
 drivers/gpu/drm/nouveau/nouveau_ttm.c         |  8 --------
 drivers/gpu/drm/ttm/ttm_bo.c                  | 12 ++++++------
 drivers/gpu/drm/ttm/ttm_bo_manager.c          |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c |  4 +---
 6 files changed, 11 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 627104401e84..2baa80224fa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -229,7 +229,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
 	if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
 	    atomic64_read(&mgr->available) < mem->num_pages) {
 		spin_unlock(&mgr->lock);
-		return 0;
+		return -ENOSPC;
 	}
 	atomic64_sub(mem->num_pages, &mgr->available);
 	spin_unlock(&mgr->lock);
@@ -249,8 +249,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
 		r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
 		if (unlikely(r)) {
 			kfree(node);
-			mem->mm_node = NULL;
-			r = 0;
 			goto err_out;
 		}
 	} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 128a667ed8fa..e8d1dd564006 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -336,8 +336,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
 	if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
 		atomic64_sub(mem_bytes, &mgr->usage);
-		mem->mm_node = NULL;
-		return 0;
+		return -ENOSPC;
 	}
 
 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -417,7 +416,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
 
 	kvfree(nodes);
-	return r == -ENOSPC ? 0 : r;
+	return r;
 }
 
 /**
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7ca0a2498532..e89ea052cf71 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -75,10 +75,6 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
 	if (ret) {
 		nouveau_mem_del(reg);
-		if (ret == -ENOSPC) {
-			reg->mm_node = NULL;
-			return 0;
-		}
 		return ret;
 	}
 
@@ -139,10 +135,6 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
 			   reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
 	if (ret) {
 		nouveau_mem_del(reg);
-		if (ret == -ENOSPC) {
-			reg->mm_node = NULL;
-			return 0;
-		}
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f78cfc76ad78..06b8bc0d8f23 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -909,10 +909,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 	ticket = dma_resv_locking_ctx(bo->base.resv);
 	do {
 		ret = (*man->func->get_node)(man, bo, place, mem);
-		if (unlikely(ret != 0))
-			return ret;
-		if (mem->mm_node)
+		if (likely(!ret))
 			break;
+		if (unlikely(ret != -ENOSPC))
+			return ret;
 		ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
 					  ticket);
 		if (unlikely(ret != 0))
@@ -1056,12 +1056,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
 		man = &bdev->man[mem->mem_type];
 		ret = (*man->func->get_node)(man, bo, place, mem);
+		if (ret == -ENOSPC)
+			continue;
 		if (unlikely(ret))
 			goto error;
 
-		if (!mem->mm_node)
-			continue;
-
 		ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
 		if (unlikely(ret)) {
 			(*man->func->put_node)(man, mem);
@@ -1121,6 +1120,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 
 	dma_resv_assert_held(bo->base.resv);
 
+	memset(&mem, 0, sizeof(mem));
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 18d3debcc949..facd3049c3aa 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -86,7 +86,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 		mem->start = node->start;
 	}
 
-	return 0;
+	return ret;
 }
 
 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 7da752ca1c34..4a76fc7114ad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -53,8 +53,6 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 		(struct vmwgfx_gmrid_man *)man->priv;
 	int id;
 
-	mem->mm_node = NULL;
-
 	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
 	if (id < 0)
 		return (id != -ENOMEM ? 0 : id);
@@ -78,7 +76,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 	gman->used_gmr_pages -= bo->num_pages;
 	spin_unlock(&gman->lock);
 	ida_free(&gman->gmr_ida, id);
-	return 0;
+	return -ENOSPC;
 }
 
 static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
-- 
2.17.1



More information about the dri-devel mailing list