[PATCH v3 2/3] drm/amdkfd: handle svm partial migration cpages 0
Felix Kuehling
felix.kuehling at amd.com
Tue Oct 12 22:12:12 UTC 2021
Am 2021-10-12 um 5:00 p.m. schrieb Philip Yang:
> migrate_vma_setup may return cpages 0, means 0 page can be migrated,
> treat this as error case to skip the rest of vma migration steps.
>
> Change svm_migrate_vma_to_vram and svm_migrate_vma_to_ram to return the
> number of pages migrated successfully or error code. The caller add up
> all the successful migration pages and update prange->actual_loc only if
> the total migrated pages is not 0.
>
> This also removes the warning message "VRAM BO missing during
> validation" if migration cpages is 0.
>
> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
> ---
> drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 90 ++++++++++++++----------
> 1 file changed, 51 insertions(+), 39 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index b05c0579d0b9..d37f20b17586 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -376,7 +376,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
> return r;
> }
>
> -static int
> +static long
> svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
> struct vm_area_struct *vma, uint64_t start,
> uint64_t end)
> @@ -413,32 +413,37 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
> prange->start, prange->last);
> goto out_free;
> }
> - if (migrate.cpages != npages) {
> - pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
> - migrate.cpages,
> - npages);
> - }
>
> - if (migrate.cpages) {
> - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
> - scratch);
> - migrate_vma_pages(&migrate);
> - svm_migrate_copy_done(adev, mfence);
> - migrate_vma_finalize(&migrate);
> + if (migrate.cpages != npages)
> + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
> + migrate.cpages, npages);
> + else
> + pr_debug("0x%lx pages migrated\n", migrate.cpages);
> +
> + if (!migrate.cpages) {
> + pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
> + prange->start, prange->last);
> + goto out_free;
> }
>
> + r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
> + migrate_vma_pages(&migrate);
> + svm_migrate_copy_done(adev, mfence);
> + migrate_vma_finalize(&migrate);
> +
> svm_range_dma_unmap(adev->dev, scratch, 0, npages);
> svm_range_free_dma_mappings(prange);
>
> out_free:
> kvfree(buf);
> out:
> - if (!r) {
> + if (!r && migrate.cpages) {
> pdd = svm_range_get_pdd_by_adev(prange, adev);
> if (pdd)
> WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
> - }
>
> + return migrate.cpages;
> + }
> return r;
> }
>
> @@ -460,7 +465,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
> unsigned long addr, start, end;
> struct vm_area_struct *vma;
> struct amdgpu_device *adev;
> - int r = 0;
> + unsigned long cpages = 0;
> + long r;
>
> if (prange->actual_loc == best_loc) {
> pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
> @@ -492,17 +498,16 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
>
> next = min(vma->vm_end, end);
> r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
> - if (r) {
> - pr_debug("failed to migrate\n");
> - break;
> - }
> + if (r > 0)
> + cpages += r;
I think you still want to break out of the loop here if r < 0,
potentially with a debug message.
> addr = next;
> }
>
> - if (!r)
> + if (cpages) {
> prange->actual_loc = best_loc;
> -
> - return r;
> + return 0;
> + }
> + return -ENOMEM;
> }
>
> static void svm_migrate_page_free(struct page *page)
> @@ -603,7 +608,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
> return r;
> }
>
> -static int
> +static long
> svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
> struct vm_area_struct *vma, uint64_t start, uint64_t end)
> {
> @@ -640,29 +645,35 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
> goto out_free;
> }
>
> - pr_debug("cpages %ld\n", migrate.cpages);
> + if (migrate.cpages != npages)
> + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
> + migrate.cpages, npages);
> + else
> + pr_debug("0x%lx pages migrated\n", migrate.cpages);
>
> - if (migrate.cpages) {
> - r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
> - scratch, npages);
> - migrate_vma_pages(&migrate);
> - svm_migrate_copy_done(adev, mfence);
> - migrate_vma_finalize(&migrate);
> - } else {
> + if (!migrate.cpages) {
> pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
> prange->start, prange->last);
> + goto out_free;
> }
>
> + r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
> + scratch, npages);
> + migrate_vma_pages(&migrate);
> + svm_migrate_copy_done(adev, mfence);
> + migrate_vma_finalize(&migrate);
> svm_range_dma_unmap(adev->dev, scratch, 0, npages);
>
> out_free:
> kvfree(buf);
> out:
> - if (!r) {
> + if (!r && migrate.cpages) {
> pdd = svm_range_get_pdd_by_adev(prange, adev);
> if (pdd)
> WRITE_ONCE(pdd->page_out,
> pdd->page_out + migrate.cpages);
> +
> + return migrate.cpages;
> }
> return r;
> }
> @@ -684,7 +695,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
> unsigned long addr;
> unsigned long start;
> unsigned long end;
> - int r = 0;
> + unsigned long cpages = 0;
> + long r;
>
> if (!prange->actual_loc) {
> pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
> @@ -715,18 +727,18 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
>
> next = min(vma->vm_end, end);
> r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
> - if (r) {
> - pr_debug("failed %d to migrate\n", r);
> - break;
> - }
> + if (r > 0)
> + cpages += r;
Same as above.
Regards,
Felix
> addr = next;
> }
>
> - if (!r) {
> + if (cpages) {
> svm_range_vram_node_free(prange);
> prange->actual_loc = 0;
> +
> + return 0;
> }
> - return r;
> + return -ENOMEM;
> }
>
> /**
More information about the amd-gfx
mailing list