[PATCH 06/11] drm/amdgpu: use all pte rings to recover page table

Chunming Zhou David1.Zhou at amd.com
Tue Aug 2 07:51:37 UTC 2016


Change-Id: Ic74508ec9de0bf1c027313ce9574e6cb8ea9bb1d
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 34 ++++++++++++++++++++++--------
 1 file changed, 25 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b470e5a..b7b4cf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2098,6 +2098,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 	int i, r;
 	int resched;
 	bool need_full_reset;
+	u32 unpark_bits;
 
 	if (!amdgpu_check_soft_reset(adev)) {
 		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
@@ -2119,6 +2120,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 		amd_sched_hw_job_reset(&ring->sched);
 		amdgpu_ring_reset(ring);
 	}
+	unpark_bits = 0;
 	/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
 	amdgpu_fence_driver_force_completion(adev);
 	/* store modesetting */
@@ -2162,8 +2164,6 @@ retry:
 		amdgpu_atombios_scratch_regs_restore(adev);
 	}
 	if (!r) {
-		struct amdgpu_ring *buffer_ring = adev->mman.buffer_funcs_ring;
-
 		amdgpu_irq_gpu_reset_resume_helper(adev);
 		r = amdgpu_ib_ring_tests(adev);
 		if (r) {
@@ -2178,11 +2178,20 @@ retry:
 		 */
 		if (need_full_reset && !(adev->flags & AMD_IS_APU)) {
 			struct amdgpu_vm *vm, *tmp;
+			int i;
 
 			DRM_INFO("recover page table from shadow\n");
-			amd_sched_rq_block_entity(
-				&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
-			kthread_unpark(buffer_ring->sched.thread);
+			for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) {
+				struct amdgpu_ring *ring = adev->vm_manager.vm_pte_rings[i];
+
+				amd_sched_rq_block_entity(
+					&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], true);
+				amd_sched_rq_block_entity(
+					&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
+				kthread_unpark(ring->sched.thread);
+				unpark_bits |= 1 << ring->idx;
+			}
+
 			spin_lock(&adev->vm_list_lock);
 			list_for_each_entry_safe(vm, tmp, &adev->vm_list, list) {
 				spin_unlock(&adev->vm_list_lock);
@@ -2190,8 +2199,15 @@ retry:
 				spin_lock(&adev->vm_list_lock);
 			}
 			spin_unlock(&adev->vm_list_lock);
-			amd_sched_rq_block_entity(
-				&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+
+			for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) {
+				struct amdgpu_ring *ring = adev->vm_manager.vm_pte_rings[i];
+
+				amd_sched_rq_block_entity(
+					&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], false);
+				amd_sched_rq_block_entity(
+					&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+			}
 		}
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 			struct amdgpu_ring *ring = adev->rings[i];
@@ -2199,9 +2215,9 @@ retry:
 				continue;
 
 			DRM_INFO("ring:%d recover jobs\n", ring->idx);
-			kthread_park(buffer_ring->sched.thread);
 			amd_sched_job_recovery(&ring->sched);
-			kthread_unpark(ring->sched.thread);
+			if (!((unpark_bits >> ring->idx) & 0x1))
+				kthread_unpark(ring->sched.thread);
 		}
 	} else {
 		dev_err(adev->dev, "asic resume failed (%d).\n", r);
-- 
1.9.1



More information about the amd-gfx mailing list