<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
  </head>
  <body>
    <p><br>
    </p>
    <div class="moz-cite-prefix">On 2023-10-02 13:06, Chen, Xiaogang
      wrote:<br>
    </div>
    <blockquote type="cite" cite="mid:af89092e-663e-6487-9c0e-048ccd715ed0@amd.com">
      <br>
      On 9/29/2023 9:11 AM, Philip Yang wrote:
      <br>
      <blockquote type="cite">Caution: This message originated from an
        External Source. Use proper caution when opening attachments,
        clicking links, or responding.
        <br>
        <br>
        <br>
        Align unmap range start and last address to granularity
        boundary.
        <br>
        Skip unmap if range is already unmapped from GPUs.
        <br>
        <br>
        This also solve the rocgdb CWSR migration related issue.
        <br>
        <br>
        Signed-off-by: Philip Yang <a class="moz-txt-link-rfc2396E" href="mailto:Philip.Yang@amd.com"><Philip.Yang@amd.com></a>
        <br>
        ---
        <br>
          drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 35
        ++++++++++++++++++++++++----
        <br>
          1 file changed, 31 insertions(+), 4 deletions(-)
        <br>
        <br>
        diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
        b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
        <br>
        index 626e0dd4ec79..ac65bf25c685 100644
        <br>
        --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
        <br>
        +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
        <br>
        @@ -2004,6 +2004,26 @@ static void svm_range_restore_work(struct
        work_struct *work)
        <br>
                 mmput(mm);
        <br>
          }
        <br>
        <br>
        +static unsigned long
        <br>
        +svm_range_align_start(struct svm_range *prange, unsigned long
        start)
        <br>
        +{
        <br>
        +       unsigned long start_align;
        <br>
        +
        <br>
        +       start_align = ALIGN_DOWN(start, 1UL <<
        prange->granularity);
        <br>
        +       start_align = max_t(unsigned long, start_align,
        prange->start);
        <br>
        +       return start_align;
        <br>
        +}
        <br>
        +
        <br>
        +static unsigned long
        <br>
        +svm_range_align_last(struct svm_range *prange, unsigned long
        last)
        <br>
        +{
        <br>
        +       unsigned long last_align;
        <br>
        +
        <br>
        +       last_align = ALIGN(last, 1UL <<
        prange->granularity) - 1;
        <br>
      </blockquote>
      <br>
      should be ALIGN(last + 1, 1UL << prange->granularity) -
      1;? Here last is included last page number.
      <br>
    </blockquote>
    yes, you are right, if evicting range [0, 0x200], we should unmap
    range [0x, 0x3ff].<br>
    <blockquote type="cite" cite="mid:af89092e-663e-6487-9c0e-048ccd715ed0@amd.com">
      <br>
      Regards
      <br>
      <br>
      Xiaogang
      <br>
      <br>
      <blockquote type="cite">+       last_align = min_t(unsigned long,
        last_align, prange->last);
        <br>
        +       return last_align;
        <br>
        +}
        <br>
        +
        <br>
          /**
        <br>
           * svm_range_evict - evict svm range
        <br>
           * @prange: svm range structure
        <br>
        @@ -2078,6 +2098,12 @@ svm_range_evict(struct svm_range *prange,
        struct mm_struct *mm,
        <br>
                         unsigned long s, l;
        <br>
                         uint32_t trigger;
        <br>
        <br>
        +               if (!svm_range_partial_mapped(prange, start,
        last)) {
        <br>
        +                       pr_debug("svms 0x%p [0x%lx 0x%lx]
        unmapped already\n",
        <br>
        +                               prange->svms, start, last);
        <br>
        +                       return 0;
        <br>
        +               }
        <br>
        +
        <br>
                         if (event == MMU_NOTIFY_MIGRATE)
        <br>
                                 trigger =
        KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
        <br>
                         else
        <br>
        @@ -2085,16 +2111,17 @@ svm_range_evict(struct svm_range
        *prange, struct mm_struct *mm,
        <br>
        <br>
                         pr_debug("invalidate unmap svms 0x%p [0x%lx
        0x%lx] from GPUs\n",
        <br>
                                  prange->svms, start, last);
        <br>
        +
        <br>
                         list_for_each_entry(pchild,
        &prange->child_list, child_list) {
        <br>
                                 mutex_lock_nested(&pchild->lock,
        1);
        <br>
        -                       s = max(start, pchild->start);
        <br>
        -                       l = min(last, pchild->last);
        <br>
        +                       s = svm_range_align_start(pchild,
        start);
        <br>
        +                       l = svm_range_align_last(pchild, last);
        <br>
                                 if (l >= s)
        <br>
                                        
        svm_range_unmap_from_gpus(pchild, s, l, trigger);
        <br>
                                 mutex_unlock(&pchild->lock);
        <br>
                         }
        <br>
        -               s = max(start, prange->start);
        <br>
        -               l = min(last, prange->last);
        <br>
        +               s = svm_range_align_start(prange, start);
        <br>
        +               l = svm_range_align_last(prange, last);
        <br>
                         if (l >= s)
        <br>
                                 svm_range_unmap_from_gpus(prange, s, l,
        trigger);
        <br>
                 }
        <br>
        --
        <br>
        2.35.1
        <br>
        <br>
      </blockquote>
    </blockquote>
  </body>
</html>