[PATCH v4 2/7] drm/amdkfd: Add helper function align range start last

Philip Yang Philip.Yang at amd.com
Mon Jan 15 22:49:45 UTC 2024


Calculate range start, last address aligned to the range granularity
size. This removes the duplicate code, and the helper function will be
used in the future patch to handle map, unmap to GPU based on range
granularity. No functional change.

Signed-off-by: Philip Yang <Philip.Yang at amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |  4 ++--
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c     | 10 ++++------
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h     | 10 ++++++++++
 3 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index dae05f70257b..64eb9023d66b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -986,8 +986,8 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
 
 	/* Align migration range start and size to granularity size */
 	size = 1UL << prange->granularity;
-	start = max(ALIGN_DOWN(addr, size), prange->start);
-	last = min(ALIGN(addr + 1, size) - 1, prange->last);
+	start = svm_range_align_start(addr, prange->start, size);
+	last = svm_range_align_last(addr, prange->last, size);
 
 	r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
 				    KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 14dbc0fd51a9..a2c96f5760ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2698,10 +2698,8 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
 			 (vma->vm_start <= vma->vm_mm->start_stack &&
 			  vma->vm_end >= vma->vm_mm->start_stack);
 
-	start_limit = max(vma->vm_start >> PAGE_SHIFT,
-		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
-	end_limit = min(vma->vm_end >> PAGE_SHIFT,
-		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
+	start_limit = svm_range_align_start(addr, vma->vm_start >> PAGE_SHIFT, 2UL << 8);
+	end_limit = svm_range_align_last(addr, (vma->vm_end >> PAGE_SHIFT) - 1, 2UL << 8) + 1;
 	/* First range that starts after the fault address */
 	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
 	if (node) {
@@ -3043,8 +3041,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 
 	/* Align migration range start and size to granularity size */
 	size = 1UL << prange->granularity;
-	start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
-	last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
+	start = svm_range_align_start(addr, prange->start, size);
+	last = svm_range_align_last(addr, prange->last, size);
 	if (prange->actual_loc != 0 || best_loc != 0) {
 		migration = true;
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 026863a0abcd..806bcac6d101 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -159,6 +159,16 @@ static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
 	return svm_bo;
 }
 
+static inline u64 svm_range_align_start(u64 addr, u64 range_start, u64 align_size)
+{
+	return max(ALIGN_DOWN(addr, align_size), range_start);
+}
+
+static inline u64 svm_range_align_last(u64 addr, u64 range_last, u64 align_size)
+{
+	return min(ALIGN(addr + 1, align_size) - 1, range_last);
+}
+
 int svm_range_list_init(struct kfd_process *p);
 void svm_range_list_fini(struct kfd_process *p);
 int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
-- 
2.35.1



More information about the amd-gfx mailing list