<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
  </head>
  <body>
    <p><br>
    </p>
    <div class="moz-cite-prefix">On 2022-03-31 10:37, Felix Kuehling
      wrote:<br>
    </div>
    <blockquote type="cite" cite="mid:03623d90-5e39-a2fb-1068-db15c592f9f9@amd.com">Am
      2022-03-31 um 02:27 schrieb Christian König:
      <br>
      <blockquote type="cite">Am 30.03.22 um 22:51 schrieb philip yang:
        <br>
        <blockquote type="cite">
          <br>
          <br>
          On 2022-03-30 05:00, Christian König wrote:
          <br>
          <blockquote type="cite">Testing the valid bit is not enough to
            figure out if we
            <br>
            need to invalidate the TLB or not.
            <br>
            <br>
            During eviction it is quite likely that we move a BO from
            VRAM to GTT and
            <br>
            update the page tables immediately to the new GTT address.
            <br>
            <br>
            Rework the whole function to get all the necessary
            parameters directly as
            <br>
            value.
            <br>
            <br>
            Signed-off-by: Christian
            König<a class="moz-txt-link-rfc2396E" href="mailto:christian.koenig@amd.com"><christian.koenig@amd.com></a>
            <br>
            ---
            <br>
              drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 63
            ++++++++++++++------------
            <br>
              drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 15 +++---
            <br>
              drivers/gpu/drm/amd/amdkfd/kfd_svm.c   | 18 ++++----
            <br>
              3 files changed, 48 insertions(+), 48 deletions(-)
            <br>
            <br>
            diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
            b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
            <br>
            index 9992a7311387..1cac90ee3318 100644
            <br>
            --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
            <br>
            +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
            <br>
            @@ -793,18 +793,19 @@ static void
            amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
            <br>
              }
            <br>
                /**
            <br>
            - * amdgpu_vm_bo_update_mapping - update a mapping in the vm
            page table
            <br>
            + * amdgpu_vm_update_range - update a range in the vm page
            table
            <br>
               *
            <br>
            - * @adev: amdgpu_device pointer of the VM
            <br>
            - * @bo_adev: amdgpu_device pointer of the mapped BO
            <br>
            - * @vm: requested vm
            <br>
            + * @adev: amdgpu_device pointer to use for commands
            <br>
            + * @vm: the VM to update the range
            <br>
               * @immediate: immediate submission in a page fault
            <br>
               * @unlocked: unlocked invalidation during MM callback
            <br>
            + * @flush_tlb: trigger tlb invalidation after update
            completed
            <br>
               * @resv: fences we need to sync to
            <br>
               * @start: start of mapped range
            <br>
               * @last: last mapped entry
            <br>
               * @flags: flags for the entries
            <br>
               * @offset: offset into nodes and pages_addr
            <br>
            + * @vram_base: base for vram mappings
            <br>
               * @res: ttm_resource to map
            <br>
               * @pages_addr: DMA addresses to use for mapping
            <br>
               * @fence: optional resulting fence
            <br>
            @@ -812,17 +813,14 @@ static void
            amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
            <br>
               * Fill in the page table entries between @start and
            @last.
            <br>
               *
            <br>
               * Returns:
            <br>
            - * 0 for success, -EINVAL for failure.
            <br>
            + * 0 for success, negative erro code for failure.
            <br>
               */
            <br>
            -int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
            <br>
            -                struct amdgpu_device *bo_adev,
            <br>
            -                struct amdgpu_vm *vm, bool immediate,
            <br>
            -                bool unlocked, struct dma_resv *resv,
            <br>
            -                uint64_t start, uint64_t last,
            <br>
            -                uint64_t flags, uint64_t offset,
            <br>
            -                struct ttm_resource *res,
            <br>
            -                dma_addr_t *pages_addr,
            <br>
            -                struct dma_fence **fence)
            <br>
            +int amdgpu_vm_update_range(struct amdgpu_device *adev,
            struct amdgpu_vm *vm,
            <br>
            +               bool immediate, bool unlocked, bool
            flush_tlb,
            <br>
            +               struct dma_resv *resv, uint64_t start,
            uint64_t last,
            <br>
            +               uint64_t flags, uint64_t offset, uint64_t
            vram_base,
            <br>
            +               struct ttm_resource *res, dma_addr_t
            *pages_addr,
            <br>
            +               struct dma_fence **fence)
            <br>
              {
            <br>
                  struct amdgpu_vm_update_params params;
            <br>
                  struct amdgpu_vm_tlb_seq_cb *tlb_cb;
            <br>
            @@ -910,8 +908,7 @@ int amdgpu_vm_bo_update_mapping(struct
            amdgpu_device *adev,
            <br>
                          }
            <br>
                        } else if (flags & (AMDGPU_PTE_VALID |
            AMDGPU_PTE_PRT)) {
            <br>
            -            addr = bo_adev->vm_manager.vram_base_offset
            +
            <br>
            -                cursor.start;
            <br>
            +            addr = vram_base + cursor.start;
            <br>
                      } else {
            <br>
                          addr = 0;
            <br>
                      }
            <br>
            @@ -927,7 +924,7 @@ int amdgpu_vm_bo_update_mapping(struct
            amdgpu_device *adev,
            <br>
                    r = vm->update_funcs->commit(&params,
            fence);
            <br>
              -    if (!(flags & AMDGPU_PTE_VALID) ||
            params.table_freed) {
            <br>
            +    if (flush_tlb || params.table_freed) {
            <br>
          </blockquote>
          <br>
          All change look good to me, but when I look at previous
          changes again, kfd_ioctl_map_memory_to_gpu is not correct now,
          as this should flush TLB if (!kfd_flush_tlb_after_unmap(dev)).
          <br>
          <br>
        </blockquote>
        <br>
        That was intentionally dropped as Felix said it wouldn't be
        necessary any more with the TLB flush rework.
        <br>
        <br>
        Is that really causing any trouble?
        <br>
      </blockquote>
      <br>
      I discussed it with Philip offline. The TLB flushing in
      kfd_ioctl_map_memory_to_gpu is still there, but it is no longer
      conditional on !kfd_flush_tlb_after_unmap. Instead kfd_flush_tlb
      checks the sequence number to find out if the flush is needed
      (presumably because we didn't flush after unmap).
      <br>
      <br>
      There is one case on Vega20+XGMI where PTEs get inadvertently
      cached in L2 texture cache. In that case, we probably need to
      flush more frequently because a cache line in L2 may contain stale
      invalid PTEs. So kfd_flush_tlb would need to ignore the sequence
      number and heavy-weight flush TLB unconditionally in this case.
      <br>
    </blockquote>
    Thanks for the explanation.<br>
    <br>
    One nitpick below, this patch is<br>
    Reviewed-by: Philip Yang <a class="moz-txt-link-rfc2396E" href="mailto:Philip.Yang@amd.com"><Philip.Yang@amd.com></a><br>
    <blockquote type="cite" cite="mid:03623d90-5e39-a2fb-1068-db15c592f9f9@amd.com">
      <br>
      Regards,
      <br>
        Felix
      <br>
      <br>
      <blockquote type="cite">
        <br>
        Christian.
        <br>
        <br>
        <blockquote type="cite">To fix it, seems we need beow change,
          then pass flush_tlb flag via kfd_ioctl_map_memory_to_gpu ->
          map_bo_to_gpuvm -> update_gpuvm_pte ->
          amdgpu_vm_bo_update
          <br>
          <br>
          -int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct
          amdgpu_bo_va *bo_va,
          <br>
                      bool clear)
          <br>
          <br>
          -    bool flush_tlb = clear;
          <br>
          <br>
          +int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct
          amdgpu_bo_va *bo_va,
          <br>
                      bool clear, bool flush_tlb)
          <br>
          <br>
          + flush_tlb |= clear;
          <br>
          <br>
          Regards,
          <br>
          <br>
          Philip
          <br>
          <br>
          <blockquote type="cite">          tlb_cb->vm = vm;
            <br>
                      if (!fence || !*fence ||
            <br>
                          dma_fence_add_callback(*fence,
            &tlb_cb->cb,
            <br>
            @@ -1010,9 +1007,10 @@ int amdgpu_vm_bo_update(struct
            amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
            <br>
                  dma_addr_t *pages_addr = NULL;
            <br>
                  struct ttm_resource *mem;
            <br>
                  struct dma_fence **last_update;
            <br>
            +    bool flush_tlb = clear;
            <br>
                  struct dma_resv *resv;
            <br>
            +    uint64_t vram_base;
            <br>
                  uint64_t flags;
            <br>
            -    struct amdgpu_device *bo_adev = adev;
            <br>
                  int r;
            <br>
                    if (clear || !bo) {
            <br>
            @@ -1037,14 +1035,18 @@ int amdgpu_vm_bo_update(struct
            amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
            <br>
                  }
            <br>
                    if (bo) {
            <br>
            +        struct amdgpu_device *bo_adev = adev;
            <br>
          </blockquote>
        </blockquote>
      </blockquote>
    </blockquote>
    <p>bo_adev is changed below, init is not needed.</p>
    <p>Regards,</p>
    <p>Philip<br>
    </p>
    <blockquote type="cite" cite="mid:03623d90-5e39-a2fb-1068-db15c592f9f9@amd.com">
      <blockquote type="cite">
        <blockquote type="cite">
          <blockquote type="cite">+
            <br>
                      flags = amdgpu_ttm_tt_pte_flags(adev,
            bo->tbo.ttm, mem);
            <br>
                        if (amdgpu_bo_encrypted(bo))
            <br>
                          flags |= AMDGPU_PTE_TMZ;
            <br>
                        bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
            <br>
            +        vram_base =
            bo_adev->vm_manager.vram_base_offset;
            <br>
                  } else {
            <br>
                      flags = 0x0;
            <br>
            +        vram_base = 0;
            <br>
                  }
            <br>
                    if (clear || (bo && bo->tbo.base.resv ==
            <br>
            @@ -1054,7 +1056,7 @@ int amdgpu_vm_bo_update(struct
            amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
            <br>
                      last_update = &bo_va->last_pt_update;
            <br>
                    if (!clear && bo_va->base.moved) {
            <br>
            -        bo_va->base.moved = false;
            <br>
            +        flush_tlb = true;
            <br>
                      list_splice_init(&bo_va->valids,
            &bo_va->invalids);
            <br>
                    } else if (bo_va->cleared != clear) {
            <br>
            @@ -1077,11 +1079,11 @@ int amdgpu_vm_bo_update(struct
            amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
            <br>
                        trace_amdgpu_vm_bo_update(mapping);
            <br>
              -        r = amdgpu_vm_bo_update_mapping(adev, bo_adev,
            vm, false, false,
            <br>
            -                        resv, mapping->start,
            <br>
            -                        mapping->last, update_flags,
            <br>
            -                        mapping->offset, mem,
            <br>
            -                        pages_addr, last_update);
            <br>
            +        r = amdgpu_vm_update_range(adev, vm, false, false,
            flush_tlb,
            <br>
            +                       resv, mapping->start,
            mapping->last,
            <br>
            +                       update_flags, mapping->offset,
            <br>
            +                       vram_base, mem, pages_addr,
            <br>
            +                       last_update);
            <br>
                      if (r)
            <br>
                          return r;
            <br>
                  }
            <br>
            @@ -1104,6 +1106,7 @@ int amdgpu_vm_bo_update(struct
            amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
            <br>
                    list_splice_init(&bo_va->invalids,
            &bo_va->valids);
            <br>
                  bo_va->cleared = clear;
            <br>
            +    bo_va->base.moved = false;
            <br>
                    if (trace_amdgpu_vm_bo_mapping_enabled()) {
            <br>
                      list_for_each_entry(mapping,
            &bo_va->valids, list)
            <br>
            @@ -1272,10 +1275,10 @@ int amdgpu_vm_clear_freed(struct
            amdgpu_device *adev,
            <br>
                          mapping->start < AMDGPU_GMC_HOLE_START)
            <br>
                          init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
            <br>
              -        r = amdgpu_vm_bo_update_mapping(adev, adev, vm,
            false, false,
            <br>
            -                        resv, mapping->start,
            <br>
            -                        mapping->last, init_pte_value,
            <br>
            -                        0, NULL, NULL, &f);
            <br>
            +        r = amdgpu_vm_update_range(adev, vm, false, false,
            true, resv,
            <br>
            +                       mapping->start, mapping->last,
            <br>
            +                       init_pte_value, 0, 0, NULL, NULL,
            <br>
            +                       &f);
            <br>
                      amdgpu_vm_free_mapping(adev, vm, mapping, f);
            <br>
                      if (r) {
            <br>
                          dma_fence_put(f);
            <br>
            @@ -2519,8 +2522,8 @@ bool amdgpu_vm_handle_fault(struct
            amdgpu_device *adev, u32 pasid,
            <br>
                      goto error_unlock;
            <br>
                  }
            <br>
              -    r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true,
            false, NULL, addr,
            <br>
            -                    addr, flags, value, NULL, NULL, NULL);
            <br>
            +    r = amdgpu_vm_update_range(adev, vm, true, false,
            false, NULL, addr,
            <br>
            +                   addr, flags, value, 0, NULL, NULL,
            NULL);
            <br>
                  if (r)
            <br>
                      goto error_unlock;
            <br>
              diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
            b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
            <br>
            index 6b7682fe84f8..1a814fbffff8 100644
            <br>
            --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
            <br>
            +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
            <br>
            @@ -402,15 +402,12 @@ int amdgpu_vm_handle_moved(struct
            amdgpu_device *adev,
            <br>
                             struct amdgpu_vm *vm);
            <br>
              void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base
            *base,
            <br>
                              struct amdgpu_vm *vm, struct amdgpu_bo
            *bo);
            <br>
            -int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
            <br>
            -                struct amdgpu_device *bo_adev,
            <br>
            -                struct amdgpu_vm *vm, bool immediate,
            <br>
            -                bool unlocked, struct dma_resv *resv,
            <br>
            -                uint64_t start, uint64_t last,
            <br>
            -                uint64_t flags, uint64_t offset,
            <br>
            -                struct ttm_resource *res,
            <br>
            -                dma_addr_t *pages_addr,
            <br>
            -                struct dma_fence **fence);
            <br>
            +int amdgpu_vm_update_range(struct amdgpu_device *adev,
            struct amdgpu_vm *vm,
            <br>
            +               bool immediate, bool unlocked, bool
            flush_tlb,
            <br>
            +               struct dma_resv *resv, uint64_t start,
            uint64_t last,
            <br>
            +               uint64_t flags, uint64_t offset, uint64_t
            vram_base,
            <br>
            +               struct ttm_resource *res, dma_addr_t
            *pages_addr,
            <br>
            +               struct dma_fence **fence);
            <br>
              int amdgpu_vm_bo_update(struct amdgpu_device *adev,
            <br>
                          struct amdgpu_bo_va *bo_va,
            <br>
                          bool clear);
            <br>
            diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
            b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
            <br>
            index 27533f6057e0..907b02045824 100644
            <br>
            --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
            <br>
            +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
            <br>
            @@ -1188,9 +1188,9 @@ svm_range_unmap_from_gpu(struct
            amdgpu_device *adev, struct amdgpu_vm *vm,
            <br>
                    pr_debug("[0x%llx 0x%llx]\n", start, last);
            <br>
              -    return amdgpu_vm_bo_update_mapping(adev, adev, vm,
            false, true, NULL,
            <br>
            -                       start, last, init_pte_value, 0,
            <br>
            -                       NULL, NULL, fence);
            <br>
            +    return amdgpu_vm_update_range(adev, vm, false, true,
            true, NULL, start,
            <br>
            +                      last, init_pte_value, 0, 0, NULL,
            NULL,
            <br>
            +                      fence);
            <br>
              }
            <br>
                static int
            <br>
            @@ -1277,12 +1277,12 @@ svm_range_map_to_gpu(struct
            kfd_process_device *pdd, struct svm_range *prange,
            <br>
                           (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 :
            0,
            <br>
                           pte_flags);
            <br>
              -        r = amdgpu_vm_bo_update_mapping(adev, bo_adev,
            vm, false, false,
            <br>
            -                        NULL, last_start,
            <br>
            -                        prange->start + i, pte_flags,
            <br>
            -                        last_start - prange->start,
            <br>
            -                        NULL, dma_addr,
            <br>
            -                        &vm->last_update);
            <br>
            +        r = amdgpu_vm_update_range(adev, vm, false, false,
            false, NULL,
            <br>
            +                       last_start, prange->start + i,
            <br>
            +                       pte_flags,
            <br>
            +                       last_start - prange->start,
            <br>
            + bo_adev->vm_manager.vram_base_offset,
            <br>
            +                       NULL, dma_addr,
            &vm->last_update);
            <br>
                        for (j = last_start - prange->start; j <=
            i; j++)
            <br>
                          dma_addr[j] |= last_domain;
            <br>
          </blockquote>
        </blockquote>
        <br>
      </blockquote>
    </blockquote>
  </body>
</html>