[PATCH 4/4] drm/amdgpu: cleanup amdgpu_hmm_range_get_pages
Felix Kuehling
felix.kuehling at amd.com
Thu Nov 10 22:00:36 UTC 2022
Am 2022-11-10 um 08:00 schrieb Christian König:
> Remove unused parameters and cleanup dead code.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 14 +++-----------
> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 7 +++----
> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 ++---
> drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 +++---
> 4 files changed, 11 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
> index a68072f766c7..a48ea62b12b0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
> @@ -158,10 +158,9 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
> }
>
> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
> - struct mm_struct *mm, struct page **pages,
> - uint64_t start, uint64_t npages,
> - struct hmm_range **phmm_range, bool readonly,
> - bool mmap_locked, void *owner)
> + uint64_t start, uint64_t npages, bool readonly,
> + void *owner, struct page **pages,
> + struct hmm_range **phmm_range)
> {
> struct hmm_range *hmm_range;
> unsigned long timeout;
> @@ -194,14 +193,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
>
> retry:
> hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
> -
> - if (likely(!mmap_locked))
> - mmap_read_lock(mm);
> -
> r = hmm_range_fault(hmm_range);
> -
> - if (likely(!mmap_locked))
> - mmap_read_unlock(mm);
> if (unlikely(r)) {
> /*
> * FIXME: This timeout should encompass the retry from
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
> index 4e596a16d288..13ed94d3b01b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
> @@ -31,10 +31,9 @@
> #include <linux/interval_tree.h>
>
> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
> - struct mm_struct *mm, struct page **pages,
> - uint64_t start, uint64_t npages,
> - struct hmm_range **phmm_range, bool readonly,
> - bool mmap_locked, void *owner);
> + uint64_t start, uint64_t npages, bool readonly,
> + void *owner, struct page **pages,
> + struct hmm_range **phmm_range);
> int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
>
> #if defined(CONFIG_HMM_MIRROR)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 0668b09d7394..804eb04cac2c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -688,9 +688,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
> }
>
> readonly = amdgpu_ttm_tt_is_readonly(ttm);
> - r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
> - ttm->num_pages, range, readonly,
> - true, NULL);
> + r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
> + readonly, NULL, pages, range);
> out_unlock:
> mmap_read_unlock(mm);
> if (r)
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index d069e6c096b6..2dc3b04064bd 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -1596,9 +1596,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
> next = min(vma->vm_end, end);
> npages = (next - addr) >> PAGE_SHIFT;
> WRITE_ONCE(p->svms.faulting_task, current);
> - r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
> - addr, npages, &hmm_range,
> - readonly, true, owner);
> + r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
> + readonly, owner, NULL,
> + &hmm_range);
> WRITE_ONCE(p->svms.faulting_task, NULL);
> if (r) {
> pr_debug("failed %d to get svm range pages\n", r);
More information about the amd-gfx
mailing list