<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
  </head>
  <body>
    <p><br>
    </p>
    <div class="moz-cite-prefix">On 2024-02-23 08:42, Shashank Sharma
      wrote:<br>
    </div>
    <blockquote type="cite" cite="mid:20240223134206.899-3-shashank.sharma@amd.com">
      <pre class="moz-quote-pre" wrap="">This patch:
- adds a new list in amdgou_vm to hold the VM PT entries being freed
- waits for the TLB flush using the vm->tlb_flush_fence
- actually frees the PT BOs

V2: rebase
V3: Do not attach the tlb_fence to the entries, rather add the entries
    to a list and delay their freeing (Christian)

Cc: Christian König <a class="moz-txt-link-rfc2396E" href="mailto:Christian.Koenig@amd.com"><Christian.Koenig@amd.com></a>
Cc: Alex Deucher <a class="moz-txt-link-rfc2396E" href="mailto:alexander.deucher@amd.com"><alexander.deucher@amd.com></a>
Cc: Felix Kuehling <a class="moz-txt-link-rfc2396E" href="mailto:felix.kuehling@amd.com"><felix.kuehling@amd.com></a>
Cc: Rajneesh Bhardwaj <a class="moz-txt-link-rfc2396E" href="mailto:rajneesh.bhardwaj@amd.com"><rajneesh.bhardwaj@amd.com></a>
Signed-off-by: Shashank Sharma <a class="moz-txt-link-rfc2396E" href="mailto:shashank.sharma@amd.com"><shashank.sharma@amd.com></a>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  6 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h    |  6 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 51 ++++++++++++++++++++---
 3 files changed, 58 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 67c690044b97..eebb73f2c2ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -939,6 +939,10 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                /* Makes sure no PD/PT is freed before the flush */
                dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
                                   DMA_RESV_USAGE_BOOKKEEP);
+
+               mutex_lock(&vm->tlb_fence_lock);
+               vm->tlb_fence_last = *fence;
+               mutex_unlock(&vm->tlb_fence_lock);
        }
 
        amdgpu_res_first(pages_addr ? NULL : res, offset,
@@ -2212,6 +2216,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        INIT_LIST_HEAD(&vm->freed);
        INIT_LIST_HEAD(&vm->done);
        INIT_LIST_HEAD(&vm->pt_freed);
+       INIT_LIST_HEAD(&vm->tlb_flush_waitlist);
        INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
        INIT_KFIFO(vm->faults);
 
@@ -2244,6 +2249,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        vm->last_unlocked = dma_fence_get_stub();
        vm->generation = 0;
 
+       mutex_init(&vm->tlb_fence_lock);
        mutex_init(&vm->eviction_lock);
        vm->evicting = false;
        vm->tlb_fence_context = dma_fence_context_alloc(1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 8e6fd25d07b7..77f10ed80973 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -334,6 +334,10 @@ struct amdgpu_vm {
        uint64_t                *tlb_seq_cpu_addr;
        uint64_t                tlb_fence_context;
 
+       struct mutex            tlb_fence_lock;
+       struct dma_fence        *tlb_fence_last;
+       struct list_head        tlb_flush_waitlist;
+
        atomic64_t              kfd_last_flushed_seq;
 
        /* How many times we had to re-generate the page tables */
@@ -379,6 +383,8 @@ struct amdgpu_vm {
 
        /* cached fault info */
        struct amdgpu_vm_fault_info fault_info;
+
+       int count_bos;
 };
 
 struct amdgpu_vm_manager {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 95dc0afdaffb..57ea95c5c085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -643,13 +643,13 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
        if (!entry->bo)
                return;
 
-       entry->bo->vm_bo = NULL;
        shadow = amdgpu_bo_shadowed(entry->bo);
        if (shadow) {
                ttm_bo_set_bulk_move(&shadow->tbo, NULL);
                amdgpu_bo_unref(&shadow);
        }
        ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
+       entry->bo->vm_bo = NULL;
 
        spin_lock(&entry->vm->status_lock);
        list_del(&entry->vm_status);
@@ -657,6 +657,38 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
        amdgpu_bo_unref(&entry->bo);
 }
 
+static void amdgpu_vm_pt_flush_waitlist(struct amdgpu_vm *vm)
+{
+       struct amdgpu_vm_bo_base *entry, *next;
+       LIST_HEAD(tlb_flush_waitlist);
+
+       if (!vm || list_empty(&vm->tlb_flush_waitlist))
+               return;
+
+       /* Wait for pending TLB flush before freeing PT BOs */
+       mutex_lock(&vm->tlb_fence_lock);
+       if (vm->tlb_fence_last && !dma_fence_is_signaled(vm->tlb_fence_last)) {
+               if (dma_fence_wait_timeout(vm->tlb_fence_last, false,
+                                          MAX_SCHEDULE_TIMEOUT) <= 0) {
+                       DRM_ERROR("Timedout waiting for TLB flush, not freeing PT BOs\n");
+                       mutex_unlock(&vm->tlb_fence_lock);
+                       return;
+               }
+
+               vm->tlb_fence_last = NULL;
+       }
+
+       /* Save the waitlist locally and reset the flushlist */
+       list_splice_init(&vm->tlb_flush_waitlist, &tlb_flush_waitlist);
+       mutex_unlock(&vm->tlb_fence_lock);
+
+       /* Now free the entries */
+       list_for_each_entry_safe(entry, next, &tlb_flush_waitlist, vm_status) {
+               if (entry)
+                       amdgpu_vm_pt_free(entry);
+       }
+}
+
 void amdgpu_vm_pt_free_work(struct work_struct *work)
 {
        struct amdgpu_vm_bo_base *entry, *next;
@@ -673,7 +705,7 @@ void amdgpu_vm_pt_free_work(struct work_struct *work)
        amdgpu_bo_reserve(vm->root.bo, true);
 
        list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
-               amdgpu_vm_pt_free(entry);
+               list_move(&entry->vm_status, &vm->tlb_flush_waitlist);
 
        amdgpu_bo_unreserve(vm->root.bo);
 }
@@ -708,11 +740,17 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
                return;
        }
 
-       for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
-               amdgpu_vm_pt_free(entry);
+       mutex_lock(&vm->tlb_fence_lock);
+
+       for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) {
+               if (entry)
+                       list_add(&entry->vm_status, &vm->tlb_flush_waitlist);
+       }
 
        if (start)
-               amdgpu_vm_pt_free(start->entry);
+               list_add(&start->entry->vm_status, &vm->tlb_flush_waitlist);
+
+       mutex_unlock(&vm->tlb_fence_lock);</pre>
    </blockquote>
    <p>Because pt bo attached with tlb flush fence already, so
      amdgpu_vm_pt_free will not free the bo, the tlb flush work will
      signal the tlb fence, and then the pt bo will be freed.</p>
    <p>To add freed pt bo to vm->tlb_flush_waitlist, and then wait
      for tlb flush fence at end of updating mapping is unnecessary,
      this patch seems redundant.</p>
    <p>Regards,</p>
    <p>Philip<br>
    </p>
    <blockquote type="cite" cite="mid:20240223134206.899-3-shashank.sharma@amd.com">
      <pre class="moz-quote-pre" wrap=""> }
 
 /**
@@ -725,6 +763,7 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
+       amdgpu_vm_pt_flush_waitlist(vm);
 }
 
 /**
@@ -1070,6 +1109,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
                }
        }
 
+       /* Actually free the buffers now */
+       amdgpu_vm_pt_flush_waitlist(params->vm);
        return 0;
 }
 
</pre>
    </blockquote>
  </body>
</html>