[PATCH 1/2] drm/amdkfd: Save pdd to svm_bo to replace node
Philip Yang
Philip.Yang at amd.com
Fri Oct 11 15:00:05 UTC 2024
KFD process device data pdd will be used for VRAM usage accounting, save
pdd to svm_bo to avoid searching pdd for every accounting, and get KFD
node from pdd->dev.
svm_bo->pdd will always be valid because KFD process release free all
svm_bo first, then destroy process pdds.
Signed-off-by: Philip Yang <Philip.Yang at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 27 +++++++++++++++++----------
drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 +-
2 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 857ec6f23bba..d40f6fb803df 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -180,7 +180,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
page = hmm_pfn_to_page(hmm_pfns[i]);
if (is_zone_device_page(page)) {
- struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
+ struct amdgpu_device *bo_adev = prange->svm_bo->pdd->dev->adev;
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
bo_adev->vm_manager.vram_base_offset -
@@ -457,11 +457,11 @@ svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
}
if (svm_bo_ref_unless_zero(prange->svm_bo)) {
/*
- * Migrate from GPU to GPU, remove range from source svm_bo->node
+ * Migrate from GPU to GPU, remove range from source svm_bo node
* range list, and return false to allocate svm_bo from destination
* node.
*/
- if (prange->svm_bo->node != node) {
+ if (prange->svm_bo->pdd->dev != node) {
mutex_unlock(&prange->lock);
spin_lock(&prange->svm_bo->list_lock);
@@ -532,6 +532,7 @@ int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
+ struct kfd_process_device *pdd;
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
@@ -548,17 +549,22 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
return 0;
svm_bo = svm_range_bo_new();
- if (!svm_bo) {
- pr_debug("failed to alloc svm bo\n");
+ if (!svm_bo)
return -ENOMEM;
+
+ pdd = svm_range_get_pdd_by_node(prange, node);
+ if (!pdd) {
+ r = -ESRCH;
+ goto out_free;
}
+ svm_bo->pdd = pdd;
+
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("failed to get mm\n");
- kfree(svm_bo);
- return -ESRCH;
+ r = -ESRCH;
+ goto out_free;
}
- svm_bo->node = node;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -629,6 +635,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
amdgpu_bo_unref(&bo);
create_bo_failed:
dma_fence_put(&svm_bo->eviction_fence->base);
+out_free:
kfree(svm_bo);
prange->ttm_res = NULL;
@@ -1176,7 +1183,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
unsigned int mtype_local;
if (domain == SVM_RANGE_VRAM_DOMAIN)
- bo_node = prange->svm_bo->node;
+ bo_node = prange->svm_bo->pdd->dev;
switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 1):
@@ -1440,7 +1447,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
int r = 0;
if (prange->svm_bo && prange->ttm_res)
- bo_adev = prange->svm_bo->node->adev;
+ bo_adev = prange->svm_bo->pdd->dev->adev;
p = container_of(prange->svms, struct kfd_process, svms);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index bddd24f04669..fad2d6d2223a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -48,7 +48,7 @@ struct svm_range_bo {
struct work_struct eviction_work;
uint32_t evicting;
struct work_struct release_work;
- struct kfd_node *node;
+ struct kfd_process_device *pdd;
};
enum svm_work_list_ops {
--
2.43.2
More information about the amd-gfx
mailing list