[PATCH v3] drm/amdkfd: Move dma unmapping after TLB flush
Felix Kuehling
felix.kuehling at amd.com
Wed Sep 13 00:32:51 UTC 2023
On 2023-09-12 11:29, Philip Yang wrote:
> Otherwise GPU may access the stale mapping and generate IOMMU
> IO_PAGE_FAULT.
>
> Move this to inside p->mutex to prevent multiple threads mapping and
> unmapping concurrently race condition.
>
> After kfd_mem_dmaunmap_attachment is removed from unmap_bo_from_gpuvm,
> kfd_mem_dmaunmap_attachment is called if failed to map to GPUs, and
> before free the mem attachment in case failed to unmap from GPUs.
>
> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 +
> .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 26 ++++++++++++++++---
> drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 20 ++++++++------
> 3 files changed, 35 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
> index 559f14cc0a99..609a6fefd85f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
> @@ -304,6 +304,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
> struct kgd_mem *mem, void *drm_priv);
> int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
> struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
> +void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
> int amdgpu_amdkfd_gpuvm_sync_memory(
> struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
> int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index b5b940485059..7177cd884fe3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -731,7 +731,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
> enum dma_data_direction dir;
>
> if (unlikely(!ttm->sg)) {
> - pr_err("SG Table of BO is UNEXPECTEDLY NULL");
> + pr_debug("SG Table of BO is NULL");
> return;
> }
>
> @@ -1249,8 +1249,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
> amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
>
> amdgpu_sync_fence(sync, bo_va->last_pt_update);
> -
> - kfd_mem_dmaunmap_attachment(mem, entry);
> }
>
> static int update_gpuvm_pte(struct kgd_mem *mem,
> @@ -1305,6 +1303,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
>
> update_gpuvm_pte_failed:
> unmap_bo_from_gpuvm(mem, entry, sync);
> + kfd_mem_dmaunmap_attachment(mem, entry);
> return ret;
> }
>
> @@ -1910,8 +1909,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
> mem->va + bo_size * (1 + mem->aql_queue));
>
> /* Remove from VM internal data structures */
> - list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
> + list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
> + kfd_mem_dmaunmap_attachment(mem, entry);
> kfd_mem_detach(entry);
> + }
>
> ret = unreserve_bo_and_vms(&ctx, false, false);
>
> @@ -2085,6 +2086,23 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
> return ret;
> }
>
> +void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
> +{
> + struct kfd_mem_attachment *entry;
> + struct amdgpu_vm *vm;
> +
> + vm = drm_priv_to_vm(drm_priv);
> +
> + mutex_lock(&mem->lock);
> +
> + list_for_each_entry(entry, &mem->attachments, list) {
> + if (entry->bo_va->base.vm == vm)
> + kfd_mem_dmaunmap_attachment(mem, entry);
> + }
> +
> + mutex_unlock(&mem->lock);
> +}
> +
> int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
> struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
> {
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> index 65d2b9ae16bb..06988cf1db51 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> @@ -1432,17 +1432,21 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
> goto sync_memory_failed;
> }
> }
> - mutex_unlock(&p->mutex);
>
> - if (flush_tlb) {
> - /* Flush TLBs after waiting for the page table updates to complete */
> - for (i = 0; i < args->n_devices; i++) {
> - peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
> - if (WARN_ON_ONCE(!peer_pdd))
> - continue;
> + /* Flush TLBs after waiting for the page table updates to complete */
> + for (i = 0; i < args->n_devices; i++) {
> + peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
> + if (WARN_ON_ONCE(!peer_pdd))
> + continue;
> + if (flush_tlb)
> kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
> - }
> +
> + /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
> + amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
> }
> +
> + mutex_unlock(&p->mutex);
> +
> kfree(devices_arr);
>
> return 0;
More information about the amd-gfx
mailing list