[PATCH v2] drm/amdkfd: Correct the postion of reserve and unreserve memory
Emily Deng
Emily.Deng at amd.com
Fri Feb 21 01:35:08 UTC 2025
Call amdgpu_amdkfd_reserve_mem_limit in svm_range_vram_node_new when
creating a new SVM BO. Call amdgpu_amdkfd_unreserve_mem_limit
in svm_range_bo_release when the SVM BO is deleted.
v2:
Refine the error handle part in svm_range_vram_node_new.
Signed-off-by: Emily Deng <Emily.Deng at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 13 -------------
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 18 ++++++++++++++++++
2 files changed, 18 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index edff564d6a3c..724baa2e60bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -512,15 +512,6 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
- r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
- prange->npages * PAGE_SIZE,
- KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
- node->xcp ? node->xcp->id : 0);
- if (r) {
- dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
- return -ENOSPC;
- }
-
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
@@ -558,10 +549,6 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
}
out:
- amdgpu_amdkfd_unreserve_mem_limit(node->adev,
- prange->npages * PAGE_SIZE,
- KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
- node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd3e20d981e0..529c295fde06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -430,6 +430,10 @@ static void svm_range_bo_release(struct kref *kref)
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
dma_fence_put(&svm_bo->eviction_fence->base);
+ amdgpu_amdkfd_unreserve_mem_limit(svm_bo->node->adev,
+ svm_bo->bo->tbo.base.size,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ svm_bo->node->xcp ? svm_bo->node->xcp->id : 0);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
}
@@ -586,6 +590,15 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
mm,
svm_bo);
mmput(mm);
+ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+ if (r) {
+ pr_debug("failed to reserve VRAM, r: %ld\n", r);
+ r = -ENOSPC;
+ goto reserve_mem_failed;
+ }
INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
svm_bo->evicting = 0;
memset(&bp, 0, sizeof(bp));
@@ -654,6 +667,11 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
reserve_bo_failed:
amdgpu_bo_unref(&bo);
create_bo_failed:
+ amdgpu_amdkfd_unreserve_mem_limit(svm_bo->node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+reserve_mem_failed:
dma_fence_put(&svm_bo->eviction_fence->base);
kfree(svm_bo);
prange->ttm_res = NULL;
--
2.34.1
More information about the amd-gfx
mailing list