<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
  </head>
  <body>
    <p><br>
    </p>
    <div class="moz-cite-prefix">On 2022-05-18 17:40, Felix Kuehling
      wrote:<br>
    </div>
    <blockquote type="cite" cite="mid:55882ef2-9dc7-36cb-1d60-2c868ea1c14c@amd.com">
      <br>
      On 2022-05-18 14:36, philip yang wrote:
      <br>
      <blockquote type="cite">
        <br>
        <br>
        On 2022-05-17 19:11, Alex Sierra wrote:
        <br>
        <blockquote type="cite">[WHY]
          <br>
          Unified memory with xnack off should be tracked, as userptr
          mappings
          <br>
          and legacy allocations do. To avoid oversuscribe system memory
          when
          <br>
          xnack off.
          <br>
          [How]
          <br>
          Exposing functions reserve_mem_limit and unreserve_mem_limit
          to SVM
          <br>
          API and call them on every prange creation and free.
          <br>
          <br>
          Signed-off-by: Alex Sierra<a class="moz-txt-link-rfc2396E" href="mailto:alex.sierra@amd.com"><alex.sierra@amd.com></a>
          <br>
          ---
          <br>
            drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h    |  4 +++
          <br>
            .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 24
          +++++++------
          <br>
            drivers/gpu/drm/amd/amdkfd/kfd_svm.c          | 34
          ++++++++++++++-----
          <br>
            3 files changed, 43 insertions(+), 19 deletions(-)
          <br>
          <br>
          diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
          b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
          <br>
          index f8b9f27adcf5..f55f34af6480 100644
          <br>
          --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
          <br>
          +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
          <br>
          @@ -301,6 +301,10 @@ bool
          amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev,
          struct kgd_mem *
          <br>
            void amdgpu_amdkfd_block_mmu_notifications(void *p);
          <br>
            int amdgpu_amdkfd_criu_resume(void *p);
          <br>
            bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct
          amdgpu_device *adev);
          <br>
          +int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device
          *adev,
          <br>
          +        uint64_t size, u32 alloc_flag);
          <br>
          +void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device
          *adev,
          <br>
          +        uint64_t size, u32 alloc_flag);
          <br>
              #if IS_ENABLED(CONFIG_HSA_AMD)
          <br>
            void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
          <br>
          diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
          b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
          <br>
          index e985cf9c7ec0..b2236159ff39 100644
          <br>
          --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
          <br>
          +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
          <br>
          @@ -122,7 +122,7 @@ void
          amdgpu_amdkfd_reserve_system_mem(uint64_t size)
          <br>
             *
          <br>
             * Return: returns -ENOMEM in case of error, ZERO otherwise
          <br>
             */
          <br>
          -static int amdgpu_amdkfd_reserve_mem_limit(struct
          amdgpu_device *adev,
          <br>
          +int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device
          *adev,
          <br>
                    uint64_t size, u32 alloc_flag)
          <br>
            {
          <br>
                uint64_t reserved_for_pt =
          <br>
          @@ -157,8 +157,8 @@ static int
          amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
          <br>
                     kfd_mem_limit.max_system_mem_limit &&
          !no_system_mem_limit) ||
          <br>
                    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
          <br>
                     kfd_mem_limit.max_ttm_mem_limit) ||
          <br>
          -        (adev->kfd.vram_used + vram_needed >
          <br>
          -         adev->gmc.real_vram_size - reserved_for_pt)) {
          <br>
          +        (adev && (adev->kfd.vram_used +
          vram_needed >
          <br>
          +         adev->gmc.real_vram_size - reserved_for_pt))) {
          <br>
                    ret = -ENOMEM;
          <br>
                    goto release;
          <br>
                }
          <br>
          @@ -166,7 +166,8 @@ static int
          amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
          <br>
                /* Update memory accounting by decreasing available
          system
          <br>
                 * memory, TTM memory and GPU memory as computed above
          <br>
                 */
          <br>
          -    adev->kfd.vram_used += vram_needed;
          <br>
          +    if (adev)
          <br>
          +        adev->kfd.vram_used += vram_needed;
          <br>
                kfd_mem_limit.system_mem_used += system_mem_needed;
          <br>
                kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
          <br>
            @@ -175,7 +176,7 @@ static int
          amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
          <br>
                return ret;
          <br>
            }
          <br>
            -static void unreserve_mem_limit(struct amdgpu_device *adev,
          <br>
          +void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device
          *adev,
          <br>
                    uint64_t size, u32 alloc_flag)
          <br>
            {
          <br>
                spin_lock(&kfd_mem_limit.mem_limit_lock);
          <br>
          @@ -184,7 +185,8 @@ static void unreserve_mem_limit(struct
          amdgpu_device *adev,
          <br>
                    kfd_mem_limit.system_mem_used -= size;
          <br>
                    kfd_mem_limit.ttm_mem_used -= size;
          <br>
                } else if (alloc_flag &
          KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
          <br>
          -        adev->kfd.vram_used -= size;
          <br>
          +        if (adev)
          <br>
          +            adev->kfd.vram_used -= size;
          <br>
                } else if (alloc_flag &
          KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
          <br>
                    kfd_mem_limit.system_mem_used -= size;
          <br>
                } else if (!(alloc_flag &
          <br>
          @@ -193,9 +195,9 @@ static void unreserve_mem_limit(struct
          amdgpu_device *adev,
          <br>
                    pr_err("%s: Invalid BO type %#x\n", __func__,
          alloc_flag);
          <br>
                    goto release;
          <br>
                }
          <br>
          -
          <br>
          -    WARN_ONCE(adev->kfd.vram_used < 0,
          <br>
          -          "KFD VRAM memory accounting unbalanced");
          <br>
          +    if (adev)
          <br>
          +        WARN_ONCE(adev->kfd.vram_used < 0,
          <br>
          +            "KFD VRAM memory accounting unbalanced");
          <br>
                WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
          <br>
                      "KFD TTM memory accounting unbalanced");
          <br>
                WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
          <br>
          @@ -211,7 +213,7 @@ void amdgpu_amdkfd_release_notify(struct
          amdgpu_bo *bo)
          <br>
                u32 alloc_flags = bo->kfd_bo->alloc_flags;
          <br>
                u64 size = amdgpu_bo_size(bo);
          <br>
            -    unreserve_mem_limit(adev, size, alloc_flags);
          <br>
          +    amdgpu_amdkfd_unreserve_mem_limit(adev, size,
          alloc_flags);
          <br>
                  kfree(bo->kfd_bo);
          <br>
            }
          <br>
          @@ -1601,7 +1603,7 @@ int
          amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
          <br>
                /* Don't unreserve system mem limit twice */
          <br>
                goto err_reserve_limit;
          <br>
            err_bo_create:
          <br>
          -    unreserve_mem_limit(adev, size, flags);
          <br>
          +    amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
          <br>
            err_reserve_limit:
          <br>
                mutex_destroy(&(*mem)->lock);
          <br>
                if (gobj)
          <br>
          diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
          b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
          <br>
          index 5ed8d9b549a4..e7e9b388fea4 100644
          <br>
          --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
          <br>
          +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
          <br>
          @@ -261,11 +261,21 @@ void svm_range_free_dma_mappings(struct
          svm_range *prange)
          <br>
              static void svm_range_free(struct svm_range *prange)
          <br>
            {
          <br>
          +    uint64_t size = (prange->last - prange->start + 1)
          << PAGE_SHIFT;
          <br>
          +    struct kfd_process *p;
          <br>
          +
          <br>
                pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
          prange->svms, prange,
          <br>
                     prange->start, prange->last);
          <br>
                  svm_range_vram_node_free(prange);
          <br>
                svm_range_free_dma_mappings(prange);
          <br>
          +
          <br>
          +    p = container_of(prange->svms, struct kfd_process,
          svms);
          <br>
          +    if (!p->xnack_enabled) {
          <br>
          +        pr_debug("unreserve mem limit: %lld\n", size);
          <br>
          +        amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
          <br>
          +                    KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
          <br>
          +    }
          <br>
                mutex_destroy(&prange->lock);
          <br>
                mutex_destroy(&prange->migrate_mutex);
          <br>
                kfree(prange);
          <br>
          @@ -284,7 +294,7 @@ svm_range_set_default_attributes(int32_t
          *location, int32_t *prefetch_loc,
          <br>
              static struct
          <br>
            svm_range *svm_range_new(struct svm_range_list *svms,
          uint64_t start,
          <br>
          -             uint64_t last)
          <br>
          +             uint64_t last, bool is_new_alloc)
          <br>
            {
          <br>
                uint64_t size = last - start + 1;
          <br>
                struct svm_range *prange;
          <br>
          @@ -293,6 +303,15 @@ svm_range *svm_range_new(struct
          svm_range_list *svms, uint64_t start,
          <br>
                prange = kzalloc(sizeof(*prange), GFP_KERNEL);
          <br>
                if (!prange)
          <br>
                    return NULL;
          <br>
          +
          <br>
          +    p = container_of(svms, struct kfd_process, svms);
          <br>
          +    if (!p->xnack_enabled && is_new_alloc
          &&
          <br>
          +        amdgpu_amdkfd_reserve_mem_limit(NULL, size <<
          PAGE_SHIFT,
          <br>
          +                        KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
          <br>
        </blockquote>
        The range will create svm_bo to migrate to VRAM, so count
        acc_size is correct.
        <br>
      </blockquote>
      <br>
      I'm not sure how to understand this comment. The thing is, the
      VRAM BO can be evicted without losing functionality. So I don't
      think we need to track potential VRAM usage of SVM ranges. We only
      need to account for system memory usage. That includes the system
      memory being mapped, and maybe the prange structures and dma_addr
      arrays used for the mapping. However, like I said, the old
      acc_size isn't really suitable for that because it doesn't account
      for mappings on multiple GPUs.
      <br>
      <br>
      The dma address arrays take 1/512 of the memory size, per GPU.
      Even on an 8GPU system, that's only 1/64 of the memory size. So I
      think the 15/16 system memory limit still leaves enough room for
      those data structures. If that gets too tight, we may just decide
      to use a lower system memory limit, or change the system memory
      limit based on the number of GPUs in the system. That would be
      easier than accurately tracking the data structure sizes for each
      allocation and potentially each mapping on a multi-GPU system.
      <br>
    </blockquote>
    <p>Yes, based on this calculation and acc_size is not accurate now
      for mGPUs with IOMMU support and svm range, we can remove acc_size
      for svm_bo, only count svm range as userptr system memory.</p>
    <p>Regards,</p>
    <p>Philip<br>
    </p>
    <blockquote type="cite" cite="mid:55882ef2-9dc7-36cb-1d60-2c868ea1c14c@amd.com">
      <br>
      Regards,
      <br>
        Felix
      <br>
      <br>
      <br>
      <blockquote type="cite">
        <blockquote type="cite">+        pr_err("Failed to allocate
          usrptr memory\n");
          <br>
          +        kfree(prange);
          <br>
          +        return NULL;
          <br>
          +    }
          <br>
                prange->npages = size;
          <br>
                prange->svms = svms;
          <br>
                prange->start = start;
          <br>
          @@ -307,7 +326,6 @@ svm_range *svm_range_new(struct
          svm_range_list *svms, uint64_t start,
          <br>
                mutex_init(&prange->migrate_mutex);
          <br>
                mutex_init(&prange->lock);
          <br>
            -    p = container_of(svms, struct kfd_process, svms);
          <br>
                if (p->xnack_enabled)
          <br>
                    bitmap_copy(prange->bitmap_access,
          svms->bitmap_supported,
          <br>
                            MAX_GPU_INSTANCE);
          <br>
          @@ -1000,9 +1018,9 @@ svm_range_split(struct svm_range
          *prange, uint64_t start, uint64_t last,
          <br>
                  svms = prange->svms;
          <br>
                if (old_start == start)
          <br>
          -        *new = svm_range_new(svms, last + 1, old_last);
          <br>
          +        *new = svm_range_new(svms, last + 1, old_last,
          false);
          <br>
                else
          <br>
          -        *new = svm_range_new(svms, old_start, start - 1);
          <br>
          +        *new = svm_range_new(svms, old_start, start - 1,
          false);
          <br>
                if (!*new)
          <br>
                    return -ENOMEM;
          <br>
            @@ -1825,7 +1843,7 @@ static struct svm_range
          *svm_range_clone(struct svm_range *old)
          <br>
            {
          <br>
                struct svm_range *new;
          <br>
            -    new = svm_range_new(old->svms, old->start,
          old->last);
          <br>
          +    new = svm_range_new(old->svms, old->start,
          old->last, true);
          <br>
        </blockquote>
        <br>
        clone prange is not new memory allocation, use false.
        <br>
        <br>
        Regards,
        <br>
        <br>
        Philip
        <br>
        <br>
        <blockquote type="cite">      if (!new)
          <br>
                    return NULL;
          <br>
            @@ -1951,7 +1969,7 @@ svm_range_add(struct kfd_process *p,
          uint64_t start, uint64_t size,
          <br>
                      /* insert a new node if needed */
          <br>
                    if (node->start > start) {
          <br>
          -            prange = svm_range_new(svms, start,
          node->start - 1);
          <br>
          +            prange = svm_range_new(svms, start,
          node->start - 1, true);
          <br>
                        if (!prange) {
          <br>
                            r = -ENOMEM;
          <br>
                            goto out;
          <br>
          @@ -1967,7 +1985,7 @@ svm_range_add(struct kfd_process *p,
          uint64_t start, uint64_t size,
          <br>
                  /* add a final range at the end if needed */
          <br>
                if (start <= last) {
          <br>
          -        prange = svm_range_new(svms, start, last);
          <br>
          +        prange = svm_range_new(svms, start, last, true);
          <br>
                    if (!prange) {
          <br>
                        r = -ENOMEM;
          <br>
                        goto out;
          <br>
          @@ -2585,7 +2603,7 @@ svm_range
          *svm_range_create_unregistered_range(struct amdgpu_device
          *adev,
          <br>
                    last = addr;
          <br>
                }
          <br>
            -    prange = svm_range_new(&p->svms, start, last);
          <br>
          +    prange = svm_range_new(&p->svms, start, last,
          true);
          <br>
                if (!prange) {
          <br>
                    pr_debug("Failed to create prange in address
          [0x%llx]\n", addr);
          <br>
                    return NULL;
          <br>
        </blockquote>
      </blockquote>
    </blockquote>
  </body>
</html>