[PATCH] drm/amdkfd: change kfd/svm page fault drain handling

Xiaogang.Chen xiaogang.chen at amd.com
Mon Jul 15 20:19:35 UTC 2024


From: Xiaogang Chen <xiaogang.chen at amd.com>

When app unmap vm ranges(munmap) kfd/svm starts drain pending page fault and
not handle any incoming pages fault of this process until a deferred work item
got executed by default system wq. The time period of "no page fault handling"
is unpredicable. That adveser kfd performance on page faults recovery.

This patch drain pending page faults just before gpu vm range unmap from app,
so reduce the time period that kfd not handle page fault.

Any page fault happens on unmapped ranges after drain pending page pault is app
bug that it accesses vm range after unmap. It is not driver work to cover that.

Signed-off-by: Xiaogang.Chen <Xiaogang.Chen at amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 36 ++++++++++------------------
 1 file changed, 13 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 407636a68814..83e694be143d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2263,16 +2263,10 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
 {
 	struct kfd_process_device *pdd;
 	struct kfd_process *p;
-	int drain;
 	uint32_t i;
 
 	p = container_of(svms, struct kfd_process, svms);
 
-restart:
-	drain = atomic_read(&svms->drain_pagefaults);
-	if (!drain)
-		return;
-
 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
 		pdd = p->pdds[i];
 		if (!pdd)
@@ -2292,8 +2286,6 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
 
 		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
 	}
-	if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
-		goto restart;
 }
 
 static void svm_range_deferred_list_work(struct work_struct *work)
@@ -2315,17 +2307,8 @@ static void svm_range_deferred_list_work(struct work_struct *work)
 			 prange->start, prange->last, prange->work_item.op);
 
 		mm = prange->work_item.mm;
-retry:
-		mmap_write_lock(mm);
 
-		/* Checking for the need to drain retry faults must be inside
-		 * mmap write lock to serialize with munmap notifiers.
-		 */
-		if (unlikely(atomic_read(&svms->drain_pagefaults))) {
-			mmap_write_unlock(mm);
-			svm_range_drain_retry_fault(svms);
-			goto retry;
-		}
+		mmap_write_lock(mm);
 
 		/* Remove from deferred_list must be inside mmap write lock, for
 		 * two race cases:
@@ -2455,11 +2438,17 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
 		 prange, prange->start, prange->last, start, last);
 
-	/* Make sure pending page faults are drained in the deferred worker
-	 * before the range is freed to avoid straggler interrupts on
-	 * unmapped memory causing "phantom faults".
+	/* before unmap pages from gpu drain pending page faults to avoid straggler
+	 * interrupts on will-be unmapped memory causing "phantom faults"
+	 * set drain_pagefaults to have page fault handler drop incoming
+	 * page faults and svm_range_drain_retry_fault drain page fault enties
+	 * untill current ts
+	 * page faults on these unmapped pages after current ts are not faults that
+	 * driver needs to drop, they are app bug that "access after unmap"
 	 */
-	atomic_inc(&svms->drain_pagefaults);
+	atomic_set(&svms->drain_pagefaults, 1);
+	svm_range_drain_retry_fault(svms);
+	atomic_set(&svms->drain_pagefaults, 0);
 
 	unmap_parent = start <= prange->start && last >= prange->last;
 
@@ -3174,8 +3163,9 @@ void svm_range_list_fini(struct kfd_process *p)
 	 * Ensure no retry fault comes in afterwards, as page fault handler will
 	 * not find kfd process and take mm lock to recover fault.
 	 */
-	atomic_inc(&p->svms.drain_pagefaults);
+	atomic_set(&p->svms.drain_pagefaults, 1);
 	svm_range_drain_retry_fault(&p->svms);
+	atomic_set(&p->svms.drain_pagefaults, 0);
 
 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
 		svm_range_unlink(prange);
-- 
2.25.1



More information about the amd-gfx mailing list