[PATCH] drm/amdkfd: Seperate dma unmap and free of dma address array operations
Felix Kuehling
felix.kuehling at amd.com
Tue Sep 19 21:22:13 UTC 2023
On 2023-09-15 18:13, Xiaogang.Chen wrote:
> From: Xiaogang Chen <xiaogang.chen at amd.com>
>
> We do not need free dma address array of svm_range each time we do dma unmap
> for pages in svm_range as we can reuse the same array. Only free it when free
> svm_range. Seperate these two operations and use them accordinly.
>
> Signed-off-by: Xiaogang.Chen <Xiaogang.Chen at amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
> ---
> drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 6 +++---
> drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 23 ++++++++++++++++-------
> drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 ++--
> 3 files changed, 21 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index 192b0d106413..6c25dab051d5 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -460,7 +460,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
> start >> PAGE_SHIFT, end >> PAGE_SHIFT,
> 0, node->id, trigger);
>
> - svm_range_dma_unmap(adev->dev, scratch, 0, npages);
> + svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
>
> out_free:
> kvfree(buf);
> @@ -544,7 +544,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
>
> if (cpages) {
> prange->actual_loc = best_loc;
> - svm_range_free_dma_mappings(prange, true);
> + svm_range_dma_unmap(prange);
> } else {
> svm_range_vram_node_free(prange);
> }
> @@ -745,7 +745,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
> start >> PAGE_SHIFT, end >> PAGE_SHIFT,
> node->id, 0, trigger);
>
> - svm_range_dma_unmap(adev->dev, scratch, 0, npages);
> + svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
>
> out_free:
> kvfree(buf);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 5d7ba7dbf6ce..bed0f8bf83c7 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -229,7 +229,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
> return r;
> }
>
> -void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
> +void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
> unsigned long offset, unsigned long npages)
> {
> enum dma_data_direction dir = DMA_BIDIRECTIONAL;
> @@ -247,7 +247,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
> }
> }
>
> -void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
> +void svm_range_dma_unmap(struct svm_range *prange)
> {
> struct kfd_process_device *pdd;
> dma_addr_t *dma_addr;
> @@ -268,10 +268,8 @@ void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
> continue;
> }
> dev = &pdd->dev->adev->pdev->dev;
> - if (unmap_dma)
> - svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
> - kvfree(dma_addr);
> - prange->dma_addr[gpuidx] = NULL;
> +
> + svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
> }
> }
>
> @@ -279,18 +277,29 @@ static void svm_range_free(struct svm_range *prange, bool do_unmap)
> {
> uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
> struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
> + uint32_t gpuidx;
>
> pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
> prange->start, prange->last);
>
> svm_range_vram_node_free(prange);
> - svm_range_free_dma_mappings(prange, do_unmap);
> + if (do_unmap)
> + svm_range_dma_unmap(prange);
>
> if (do_unmap && !p->xnack_enabled) {
> pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
> amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
> KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
> }
> +
> + /* free dma_addr array for each gpu */
> + for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
> + if (prange->dma_addr[gpuidx]) {
> + kvfree(prange->dma_addr[gpuidx]);
> + prange->dma_addr[gpuidx] = NULL;
> + }
> + }
> +
> mutex_destroy(&prange->lock);
> mutex_destroy(&prange->migrate_mutex);
> kfree(prange);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> index 9e668eeefb32..78bfb83cd0c0 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> @@ -181,9 +181,9 @@ void svm_range_add_list_work(struct svm_range_list *svms,
> struct svm_range *prange, struct mm_struct *mm,
> enum svm_work_list_ops op);
> void schedule_deferred_list_work(struct svm_range_list *svms);
> -void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
> +void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
> unsigned long offset, unsigned long npages);
> -void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma);
> +void svm_range_dma_unmap(struct svm_range *prange);
> int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
> uint64_t *svm_priv_data_size);
> int kfd_criu_checkpoint_svm(struct kfd_process *p,
More information about the amd-gfx
mailing list