[PATCH v3] drm/amdkfd: Use partial mapping in GPU page faults

Chen, Xiaogang xiaogang.chen at amd.com
Sun Oct 29 18:39:57 UTC 2023


On 10/23/2023 6:08 PM, Felix Kuehling wrote:
> On 2023-10-20 17:53, Xiaogang.Chen wrote:
>> From: Xiaogang Chen <xiaogang.chen at amd.com>
>>
>> After partial migration to recover GPU page fault this patch does GPU vm
>> space mapping for same page range that got migrated intead of mapping 
>> all
>> pages of svm range in which the page fault happened.
>>
>> Signed-off-by: Xiaogang Chen<Xiaogang.Chen at amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 29 ++++++++++++++++++++--------
>>   1 file changed, 21 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
>> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> index 54af7a2b29f8..3a71d04779b1 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
>> @@ -1619,6 +1619,7 @@ static void *kfd_svm_page_owner(struct 
>> kfd_process *p, int32_t gpuidx)
>>    * 5. Release page table (and SVM BO) reservation
>>    */
>>   static int svm_range_validate_and_map(struct mm_struct *mm,
>> +                      unsigned long map_start, unsigned long map_last,
>>                         struct svm_range *prange, int32_t gpuidx,
>>                         bool intr, bool wait, bool flush_tlb)
>>   {
>> @@ -1699,6 +1700,8 @@ static int svm_range_validate_and_map(struct 
>> mm_struct *mm,
>>       end = (prange->last + 1) << PAGE_SHIFT;
>>       for (addr = start; !r && addr < end; ) {
>>           struct hmm_range *hmm_range;
>> +        unsigned long map_start_vma;
>> +        unsigned long map_last_vma;
>>           struct vm_area_struct *vma;
>>           uint64_t vram_pages_vma;
>>           unsigned long next = 0;
>> @@ -1747,9 +1750,16 @@ static int svm_range_validate_and_map(struct 
>> mm_struct *mm,
>>               r = -EAGAIN;
>>           }
>>   -        if (!r)
>> -            r = svm_range_map_to_gpus(prange, offset, npages, readonly,
>> -                          ctx->bitmap, wait, flush_tlb);
>> +        if (!r) {
>> +            map_start_vma = max(map_start, prange->start + offset);
>> +            map_last_vma = min(map_last, prange->start + offset + 
>> npages - 1);
>> +            if (map_start_vma <= map_last_vma) {
>> +                offset = map_start_vma - prange->start;
>> +                npages = map_last_vma - map_start_vma + 1;
>> +                r = svm_range_map_to_gpus(prange, offset, npages, 
>> readonly,
>> +                              ctx->bitmap, wait, flush_tlb);
>> +            }
>> +        }
>>             if (!r && next == end)
>>               prange->mapped_to_gpu = true;
>> @@ -1855,8 +1865,8 @@ static void svm_range_restore_work(struct 
>> work_struct *work)
>>            */
>>           mutex_lock(&prange->migrate_mutex);
>>   -        r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
>> -                           false, true, false);
>> +        r = svm_range_validate_and_map(mm, prange->start, 
>> prange->last, prange,
>> +                           MAX_GPU_INSTANCE, false, true, false);
>>           if (r)
>>               pr_debug("failed %d to map 0x%lx to gpus\n", r,
>>                    prange->start);
>> @@ -3069,6 +3079,8 @@ svm_range_restore_pages(struct amdgpu_device 
>> *adev, unsigned int pasid,
>>       kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
>>                          write_fault, timestamp);
>>   +    start = prange->start;
>> +    last = prange->last;
>
> This means, page faults that don't migrate will map the whole range. 
> Should we move the proper assignment of start and last out of the 
> condition below, so it applies equally to page faults that migrate and 
> those that don't?

This case means the buffer is at right place already, but not mapped to 
gpu vm due to some seasons. I thought that we do partial mapping too in 
this case. Since this patch was reverted I will do that next time.

Regards

Xiaogang

>
> Regards,
>   Felix
>
>
>>       if (prange->actual_loc != 0 || best_loc != 0) {
>>           migration = true;
>>           /* Align migration range start and size to granularity size */
>> @@ -3102,10 +3114,11 @@ svm_range_restore_pages(struct amdgpu_device 
>> *adev, unsigned int pasid,
>>           }
>>       }
>>   -    r = svm_range_validate_and_map(mm, prange, gpuidx, false, 
>> false, false);
>> +    r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, 
>> false,
>> +                       false, false);
>>       if (r)
>>           pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
>> -             r, svms, prange->start, prange->last);
>> +             r, svms, start, last);
>>         kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
>>                        migration);
>> @@ -3650,7 +3663,7 @@ svm_range_set_attr(struct kfd_process *p, 
>> struct mm_struct *mm,
>>             flush_tlb = !migrated && update_mapping && 
>> prange->mapped_to_gpu;
>>   -        r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
>> +        r = svm_range_validate_and_map(mm, prange->start, 
>> prange->last, prange, MAX_GPU_INSTANCE,
>>                              true, true, flush_tlb);
>>           if (r)
>>               pr_debug("failed %d to map svm range\n", r);


More information about the amd-gfx mailing list