[PATCH 08/35] drm/amdgpu: add common HMM get pages function

Christian König ckoenig.leichtzumerken at gmail.com
Thu Jan 7 10:53:08 UTC 2021


Am 07.01.21 um 04:01 schrieb Felix Kuehling:
> From: Philip Yang <Philip.Yang at amd.com>
>
> Move the HMM get pages function from amdgpu_ttm and to amdgpu_mn. This
> common function will be used by new svm APIs.
>
> Signed-off-by: Philip Yang <Philip.Yang at amd.com>
> Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>

Acked-by: Christian König <christian.koenig at amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c  | 83 +++++++++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h  |  7 +++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 76 +++-------------------
>   3 files changed, 100 insertions(+), 66 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> index 828b5167ff12..997da4237a10 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> @@ -155,3 +155,86 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
>   	mmu_interval_notifier_remove(&bo->notifier);
>   	bo->notifier.mm = NULL;
>   }
> +
> +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
> +			       struct mm_struct *mm, struct page **pages,
> +			       uint64_t start, uint64_t npages,
> +			       struct hmm_range **phmm_range, bool readonly,
> +			       bool mmap_locked)
> +{
> +	struct hmm_range *hmm_range;
> +	unsigned long timeout;
> +	unsigned long i;
> +	unsigned long *pfns;
> +	int r = 0;
> +
> +	hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
> +	if (unlikely(!hmm_range))
> +		return -ENOMEM;
> +
> +	pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
> +	if (unlikely(!pfns)) {
> +		r = -ENOMEM;
> +		goto out_free_range;
> +	}
> +
> +	hmm_range->notifier = notifier;
> +	hmm_range->default_flags = HMM_PFN_REQ_FAULT;
> +	if (!readonly)
> +		hmm_range->default_flags |= HMM_PFN_REQ_WRITE;
> +	hmm_range->hmm_pfns = pfns;
> +	hmm_range->start = start;
> +	hmm_range->end = start + npages * PAGE_SIZE;
> +	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
> +
> +retry:
> +	hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
> +
> +	if (likely(!mmap_locked))
> +		mmap_read_lock(mm);
> +
> +	r = hmm_range_fault(hmm_range);
> +
> +	if (likely(!mmap_locked))
> +		mmap_read_unlock(mm);
> +	if (unlikely(r)) {
> +		/*
> +		 * FIXME: This timeout should encompass the retry from
> +		 * mmu_interval_read_retry() as well.
> +		 */
> +		if (r == -EBUSY && !time_after(jiffies, timeout))
> +			goto retry;
> +		goto out_free_pfns;
> +	}
> +
> +	/*
> +	 * Due to default_flags, all pages are HMM_PFN_VALID or
> +	 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
> +	 * the notifier_lock, and mmu_interval_read_retry() must be done first.
> +	 */
> +	for (i = 0; pages && i < npages; i++)
> +		pages[i] = hmm_pfn_to_page(pfns[i]);
> +
> +	*phmm_range = hmm_range;
> +
> +	return 0;
> +
> +out_free_pfns:
> +	kvfree(pfns);
> +out_free_range:
> +	kfree(hmm_range);
> +
> +	return r;
> +}
> +
> +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
> +{
> +	int r;
> +
> +	r = mmu_interval_read_retry(hmm_range->notifier,
> +				    hmm_range->notifier_seq);
> +	kvfree(hmm_range->hmm_pfns);
> +	kfree(hmm_range);
> +
> +	return r;
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
> index a292238f75eb..7f7d37a457c3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
> @@ -30,6 +30,13 @@
>   #include <linux/workqueue.h>
>   #include <linux/interval_tree.h>
>   
> +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
> +			       struct mm_struct *mm, struct page **pages,
> +			       uint64_t start, uint64_t npages,
> +			       struct hmm_range **phmm_range, bool readonly,
> +			       bool mmap_locked);
> +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
> +
>   #if defined(CONFIG_HMM_MIRROR)
>   int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
>   void amdgpu_mn_unregister(struct amdgpu_bo *bo);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index aaad9e304ad9..f423f42cb9b5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -32,7 +32,6 @@
>   
>   #include <linux/dma-mapping.h>
>   #include <linux/iommu.h>
> -#include <linux/hmm.h>
>   #include <linux/pagemap.h>
>   #include <linux/sched/task.h>
>   #include <linux/sched/mm.h>
> @@ -843,10 +842,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
>   	struct amdgpu_ttm_tt *gtt = (void *)ttm;
>   	unsigned long start = gtt->userptr;
>   	struct vm_area_struct *vma;
> -	struct hmm_range *range;
> -	unsigned long timeout;
>   	struct mm_struct *mm;
> -	unsigned long i;
> +	bool readonly;
>   	int r = 0;
>   
>   	mm = bo->notifier.mm;
> @@ -862,76 +859,26 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
>   	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
>   		return -ESRCH;
>   
> -	range = kzalloc(sizeof(*range), GFP_KERNEL);
> -	if (unlikely(!range)) {
> -		r = -ENOMEM;
> -		goto out;
> -	}
> -	range->notifier = &bo->notifier;
> -	range->start = bo->notifier.interval_tree.start;
> -	range->end = bo->notifier.interval_tree.last + 1;
> -	range->default_flags = HMM_PFN_REQ_FAULT;
> -	if (!amdgpu_ttm_tt_is_readonly(ttm))
> -		range->default_flags |= HMM_PFN_REQ_WRITE;
> -
> -	range->hmm_pfns = kvmalloc_array(ttm->num_pages,
> -					 sizeof(*range->hmm_pfns), GFP_KERNEL);
> -	if (unlikely(!range->hmm_pfns)) {
> -		r = -ENOMEM;
> -		goto out_free_ranges;
> -	}
> -
>   	mmap_read_lock(mm);
>   	vma = find_vma(mm, start);
> +	mmap_read_unlock(mm);
>   	if (unlikely(!vma || start < vma->vm_start)) {
>   		r = -EFAULT;
> -		goto out_unlock;
> +		goto out_putmm;
>   	}
>   	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
>   		vma->vm_file)) {
>   		r = -EPERM;
> -		goto out_unlock;
> +		goto out_putmm;
>   	}
> -	mmap_read_unlock(mm);
> -	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
> -
> -retry:
> -	range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
>   
> -	mmap_read_lock(mm);
> -	r = hmm_range_fault(range);
> -	mmap_read_unlock(mm);
> -	if (unlikely(r)) {
> -		/*
> -		 * FIXME: This timeout should encompass the retry from
> -		 * mmu_interval_read_retry() as well.
> -		 */
> -		if (r == -EBUSY && !time_after(jiffies, timeout))
> -			goto retry;
> -		goto out_free_pfns;
> -	}
> -
> -	/*
> -	 * Due to default_flags, all pages are HMM_PFN_VALID or
> -	 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
> -	 * the notifier_lock, and mmu_interval_read_retry() must be done first.
> -	 */
> -	for (i = 0; i < ttm->num_pages; i++)
> -		pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
> -
> -	gtt->range = range;
> +	readonly = amdgpu_ttm_tt_is_readonly(ttm);
> +	r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
> +				       ttm->num_pages, &gtt->range, readonly,
> +				       false);
> +out_putmm:
>   	mmput(mm);
>   
> -	return 0;
> -
> -out_unlock:
> -	mmap_read_unlock(mm);
> -out_free_pfns:
> -	kvfree(range->hmm_pfns);
> -out_free_ranges:
> -	kfree(range);
> -out:
> -	mmput(mm);
>   	return r;
>   }
>   
> @@ -960,10 +907,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
>   		 * FIXME: Must always hold notifier_lock for this, and must
>   		 * not ignore the return code.
>   		 */
> -		r = mmu_interval_read_retry(gtt->range->notifier,
> -					 gtt->range->notifier_seq);
> -		kvfree(gtt->range->hmm_pfns);
> -		kfree(gtt->range);
> +		r = amdgpu_hmm_range_get_pages_done(gtt->range);
>   		gtt->range = NULL;
>   	}
>   



More information about the amd-gfx mailing list