[PATCH v2 2/3] drm/amdkfd: handle svm partial migration cpages 0
Philip Yang
Philip.Yang at amd.com
Tue Oct 12 13:55:26 UTC 2021
migrate_vma_setup may return cpages 0, means 0 page can be migrated,
treat this as error case to skip the rest of vma migration steps.
Change svm_migrate_vma_to_vram and svm_migrate_vma_to_ram to return the
number of pages migrated successfully. The caller add up all the
successful migration pages and update prange->actual_loc only if the
total migrated pages is not 0.
This also remove the warning message "VRAM BO missing during
validation" if migration cpages is 0.
Signed-off-by: Philip Yang <Philip.Yang at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 92 +++++++++++++-----------
1 file changed, 49 insertions(+), 43 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b05c0579d0b9..dd0fd52d0158 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -376,7 +376,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
return r;
}
-static int
+static unsigned long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end)
@@ -413,33 +413,38 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
prange->start, prange->last);
goto out_free;
}
- if (migrate.cpages != npages) {
- pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
- migrate.cpages,
- npages);
- }
- if (migrate.cpages) {
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
- scratch);
- migrate_vma_pages(&migrate);
- svm_migrate_copy_done(adev, mfence);
- migrate_vma_finalize(&migrate);
+ if (migrate.cpages != npages)
+ pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ migrate.cpages, npages);
+ else
+ pr_debug("0x%lx pages migrated\n", migrate.cpages);
+
+ if (!migrate.cpages) {
+ pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
+ prange->start, prange->last);
+ goto out_free;
}
+ r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+ migrate_vma_pages(&migrate);
+ svm_migrate_copy_done(adev, mfence);
+ migrate_vma_finalize(&migrate);
+
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
svm_range_free_dma_mappings(prange);
out_free:
kvfree(buf);
out:
- if (!r) {
+ if (!r && migrate.cpages) {
pdd = svm_range_get_pdd_by_adev(prange, adev);
if (pdd)
WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
- }
- return r;
+ return migrate.cpages;
+ }
+ return 0;
}
/**
@@ -460,7 +465,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
- int r = 0;
+ unsigned long cpages = 0;
if (prange->actual_loc == best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
@@ -491,18 +496,15 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
- if (r) {
- pr_debug("failed to migrate\n");
- break;
- }
+ cpages += svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
addr = next;
}
- if (!r)
+ if (cpages) {
prange->actual_loc = best_loc;
-
- return r;
+ return 0;
+ }
+ return -ENOMEM;
}
static void svm_migrate_page_free(struct page *page)
@@ -603,7 +605,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
return r;
}
-static int
+static unsigned long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end)
{
@@ -640,31 +642,37 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
goto out_free;
}
- pr_debug("cpages %ld\n", migrate.cpages);
+ if (migrate.cpages != npages)
+ pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ migrate.cpages, npages);
+ else
+ pr_debug("0x%lx pages migrated\n", migrate.cpages);
- if (migrate.cpages) {
- r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
- scratch, npages);
- migrate_vma_pages(&migrate);
- svm_migrate_copy_done(adev, mfence);
- migrate_vma_finalize(&migrate);
- } else {
+ if (!migrate.cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
+ goto out_free;
}
+ r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+ scratch, npages);
+ migrate_vma_pages(&migrate);
+ svm_migrate_copy_done(adev, mfence);
+ migrate_vma_finalize(&migrate);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
out:
- if (!r) {
+ if (!r && migrate.cpages) {
pdd = svm_range_get_pdd_by_adev(prange, adev);
if (pdd)
WRITE_ONCE(pdd->page_out,
pdd->page_out + migrate.cpages);
+
+ return migrate.cpages;
}
- return r;
+ return 0;
}
/**
@@ -684,7 +692,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
unsigned long addr;
unsigned long start;
unsigned long end;
- int r = 0;
+ unsigned long cpages = 0;
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
@@ -714,19 +722,17 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
- if (r) {
- pr_debug("failed %d to migrate\n", r);
- break;
- }
+ cpages += svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
addr = next;
}
- if (!r) {
+ if (cpages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
+
+ return 0;
}
- return r;
+ return -ENOMEM;
}
/**
--
2.17.1
More information about the amd-gfx
mailing list