[PATCH 24/35] drm/amdkfd: page table restore through svm API

Felix Kuehling Felix.Kuehling at amd.com
Thu Jan 7 03:01:16 UTC 2021


From: Alex Sierra <alex.sierra at amd.com>

Page table restore implementation in SVM API. This is called from
the fault handler at amdgpu_vm. To update page tables through
the page fault retry IH.

Signed-off-by: Alex Sierra <alex.sierra at amd.com>
Signed-off-by: Philip Yang <Philip.Yang at amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 78 ++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h |  2 +
 2 files changed, 80 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index ea27c5ed4ef3..7346255f7c27 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1629,6 +1629,84 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr)
 	return container_of(node, struct svm_range, it_node);
 }
 
+int
+svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+			uint64_t addr)
+{
+	int r = 0;
+	int srcu_idx;
+	struct mm_struct *mm = NULL;
+	struct svm_range *prange;
+	struct svm_range_list *svms;
+	struct kfd_process *p;
+
+	p = kfd_lookup_process_by_pasid(pasid);
+	if (!p) {
+		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
+		return -ESRCH;
+	}
+	svms = &p->svms;
+	srcu_idx = srcu_read_lock(&svms->srcu);
+
+	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
+
+	svms_lock(svms);
+	prange = svm_range_from_addr(svms, addr);
+	svms_unlock(svms);
+	if (!prange) {
+		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
+			 svms, addr);
+		r = -EFAULT;
+		goto unlock_out;
+	}
+
+	if (!atomic_read(&prange->invalid)) {
+		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
+			 svms, prange->it_node.start, prange->it_node.last);
+		goto unlock_out;
+	}
+
+	mm = get_task_mm(p->lead_thread);
+	if (!mm) {
+		pr_debug("svms 0x%p failed to get mm\n", svms);
+		r = -ESRCH;
+		goto unlock_out;
+	}
+
+	mmap_read_lock(mm);
+
+	/*
+	 * If range is migrating, wait for migration is done.
+	 */
+	mutex_lock(&prange->mutex);
+
+	r = svm_range_validate(mm, prange);
+	if (r) {
+		pr_debug("failed %d to validate svms 0x%p [0x%lx 0x%lx]\n", r,
+			 svms, prange->it_node.start, prange->it_node.last);
+
+		goto mmput_out;
+	}
+
+	pr_debug("restoring svms 0x%p [0x%lx %lx] mapping\n",
+		 svms, prange->it_node.start, prange->it_node.last);
+
+	r = svm_range_map_to_gpus(prange, true);
+	if (r)
+		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpu\n", r,
+			 svms, prange->it_node.start, prange->it_node.last);
+
+mmput_out:
+	mutex_unlock(&prange->mutex);
+	mmap_read_unlock(mm);
+	mmput(mm);
+unlock_out:
+	srcu_read_unlock(&svms->srcu, srcu_idx);
+	kfd_unref_process(p);
+
+	return r;
+}
+
 void svm_range_list_fini(struct kfd_process *p)
 {
 	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index c67e96f764fe..e546f36ef709 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -121,5 +121,7 @@ int svm_range_vram_node_new(struct amdgpu_device *adev,
 void svm_range_vram_node_free(struct svm_range *prange);
 int svm_range_split_by_granularity(struct kfd_process *p, unsigned long addr,
 				   struct list_head *list);
+int svm_range_restore_pages(struct amdgpu_device *adev,
+			    unsigned int pasid, uint64_t addr);
 
 #endif /* KFD_SVM_H_ */
-- 
2.29.2



More information about the amd-gfx mailing list