[PATCH v3 1/3] drm/radeon: take exclusive_lock in read mode during, ring tests, v3

Maarten Lankhorst maarten.lankhorst at canonical.com
Wed Aug 20 06:20:11 PDT 2014


Hey,

I found another bug related to gpu reset, one that seems to trigger more reliable with the delayed work changes.

Op 19-08-14 om 15:06 schreef Christian König:
> Am 19.08.2014 um 14:22 schrieb Maarten Lankhorst:
>> This is needed for the next commit, because the lockup detection
>> will need the read lock to run.
>>
>> Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>
>> ---
>> radeon_pm_compute_clocks already checks if dpm is enabled, so no need to check a second time.
>>
>> Because of locking and waiting stuff the radeon_pm_compute_clocks and resume_force_mode calls
>> have to be done with read lock held.
>>
>> Seems to survive on my radeon when catting /sys/kernel/debug/dri/0/radeon_gpu_reset although
>> uvd fails to reset, and that ring gets disabled as a result.
>
> Depending on what hardware you have it's normal that UVD doesn't reset properly. I still haven't figured out the correct sequence in which I need to disable/enable the different UVD blocks on all hardware generations.
>
> It seems to work fine on my Cayman, but doesn't for example on Turks (which at least theoretically should have the same UVD block). It should be fine as long as the engines gets properly disabled when the IB test fails after an reset.
>
> Another common source of reset instability is DPM, while it now seems to be stable on NI and BTC I can't get a single reset to work once I use it.
>
> Regarding the patch it looks good now, but I still want to test it a bit,
> Christian. 

It seems that if UVD fails a second lockup recovery could in theory be
attempted because the uvd ring is locked up, and the delayed work doesn't
check if the ring is still ready or not.

The changes in "drm/radeon: handle lockup in delayed work, v3" expose 
this bug reliably, because it forces hangups to always be checked.

This results in repeated lockup recovery occuring.

Does this alternate patch look better?

I've also attached a patch to this mail test for lockups on all rings, and if they have
any saved ring data the radeon_gpu_reset function will be retried.

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b281886f6f51..eca6382f259f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -370,7 +370,7 @@ struct radeon_fence {
 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 int radeon_fence_driver_init(struct radeon_device *rdev);
 void radeon_fence_driver_fini(struct radeon_device *rdev);
-void radeon_fence_driver_force_completion(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6a219bcee66d..e13e408832e5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1395,9 +1395,7 @@ int radeon_device_init(struct radeon_device *rdev,
 	if (r)
 		return r;
 
-	r = radeon_ib_ring_tests(rdev);
-	if (r)
-		DRM_ERROR("ib ring test failed (%d).\n", r);
+	radeon_ib_ring_tests(rdev);
 
 	r = radeon_gem_debugfs_init(rdev);
 	if (r) {
@@ -1486,7 +1484,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 	struct drm_crtc *crtc;
 	struct drm_connector *connector;
 	int i, r;
-	bool force_completion = false;
 
 	if (dev == NULL || dev->dev_private == NULL) {
 		return -ENODEV;
@@ -1530,12 +1527,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 		r = radeon_fence_wait_empty(rdev, i);
 		if (r) {
 			/* delay GPU reset to resume */
-			force_completion = true;
+			radeon_fence_driver_force_completion(rdev, i);
 		}
 	}
-	if (force_completion) {
-		radeon_fence_driver_force_completion(rdev);
-	}
 
 	radeon_save_bios_scratch_regs(rdev);
 
@@ -1595,9 +1589,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 	radeon_agp_resume(rdev);
 	radeon_resume(rdev);
 
-	r = radeon_ib_ring_tests(rdev);
-	if (r)
-		DRM_ERROR("ib ring test failed (%d).\n", r);
+	radeon_ib_ring_tests(rdev);
 
 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
 		/* do dpm late init */
@@ -1663,13 +1655,10 @@ int radeon_gpu_reset(struct radeon_device *rdev)
 	unsigned ring_sizes[RADEON_NUM_RINGS];
 	uint32_t *ring_data[RADEON_NUM_RINGS];
 
-	bool saved = false;
-
 	int i, r;
 	int resched;
 
 	down_write(&rdev->exclusive_lock);
-
 	if (!rdev->needs_reset) {
 		up_write(&rdev->exclusive_lock);
 		return 0;
@@ -1687,13 +1676,11 @@ int radeon_gpu_reset(struct radeon_device *rdev)
 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
 						   &ring_data[i]);
 		if (ring_sizes[i]) {
-			saved = true;
 			dev_info(rdev->dev, "Saved %d dwords of commands "
 				 "on ring %d.\n", ring_sizes[i], i);
 		}
 	}
 
-retry:
 	r = radeon_asic_reset(rdev);
 	if (!r) {
 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
@@ -1702,34 +1689,19 @@ retry:
 
 	radeon_restore_bios_scratch_regs(rdev);
 
-	if (!r) {
-		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!r && ring_data[i]) {
 			radeon_ring_restore(rdev, &rdev->ring[i],
 					    ring_sizes[i], ring_data[i]);
-			ring_sizes[i] = 0;
-			ring_data[i] = NULL;
-		}
-
-		r = radeon_ib_ring_tests(rdev);
-		if (r) {
-			dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
-			if (saved) {
-				saved = false;
-				radeon_suspend(rdev);
-				goto retry;
-			}
-		}
-	} else {
-		radeon_fence_driver_force_completion(rdev);
-		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		} else {
+			radeon_fence_driver_force_completion(rdev, i);
 			kfree(ring_data[i]);
 		}
 	}
 
 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
 		/* do dpm late init */
-		r = radeon_pm_late_init(rdev);
-		if (r) {
+		if (radeon_pm_late_init(rdev)) {
 			rdev->pm.dpm_enabled = false;
 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
 		}
@@ -1753,19 +1725,27 @@ retry:
 	/* reset hpd state */
 	radeon_hpd_init(rdev);
 
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+	downgrade_write(&rdev->exclusive_lock);
+
 	drm_helper_resume_force_mode(rdev->ddev);
 
 	/* set the power state here in case we are a PX system or headless */
-	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+	if ((rdev->pm.pm_method == PM_METHOD_DPM))
 		radeon_pm_compute_clocks(rdev);
 
-	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-	if (r) {
+	if (!r) {
+		r = radeon_ib_ring_tests(rdev);
+		if (r && ring_data[RADEON_RING_TYPE_GFX_INDEX])
+			r = -EAGAIN;
+	}
+
+	if (r && r != -EAGAIN) {
 		/* bad news, how to tell it to userspace ? */
 		dev_info(rdev->dev, "GPU reset failed\n");
 	}
 
-	up_write(&rdev->exclusive_lock);
+	up_read(&rdev->exclusive_lock);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3fdf87318069..bd0d687379ee 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -405,7 +405,9 @@ static void radeon_flip_work_func(struct work_struct *__work)
 		r = radeon_fence_wait(work->fence, false);
 		if (r == -EDEADLK) {
 			up_read(&rdev->exclusive_lock);
-			r = radeon_gpu_reset(rdev);
+			do {
+				r = radeon_gpu_reset(rdev);
+			} while (r == -EAGAIN);
 			down_read(&rdev->exclusive_lock);
 		}
 		if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 913787085dfa..f17dfea6465c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -758,7 +758,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
 		r = radeon_fence_wait_empty(rdev, ring);
 		if (r) {
 			/* no need to trigger GPU reset as we are unloading */
-			radeon_fence_driver_force_completion(rdev);
+			radeon_fence_driver_force_completion(rdev, ring);
 		}
 		wake_up_all(&rdev->fence_queue);
 		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
@@ -771,19 +771,15 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
  * radeon_fence_driver_force_completion - force all fence waiter to complete
  *
  * @rdev: radeon device pointer
+ * @ring: the ring to complete
  *
  * In case of GPU reset failure make sure no process keep waiting on fence
  * that will never complete.
  */
-void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
 {
-	int ring;
-
-	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
-		if (!rdev->fence_drv[ring].initialized)
-			continue;
+	if (rdev->fence_drv[ring].initialized)
 		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
-	}
 }
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 5bf2c0a05827..f2b2e2ee2f3f 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -271,6 +271,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
 		if (r) {
 			ring->ready = false;
 			rdev->needs_reset = false;
+			radeon_fence_driver_force_completion(rdev, i);
 
 			if (i == RADEON_RING_TYPE_GFX_INDEX) {
 				/* oh, oh, that's really bad */
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index d65607902537..3b4e69f26014 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -304,7 +304,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
 	mutex_lock(&rdev->ring_lock);
 	*data = NULL;
 
-	if (ring->ring_obj == NULL) {
+	if (ring->ring_obj == NULL || !ring->ready) {
 		mutex_unlock(&rdev->ring_lock);
 		return 0;
 	}

-------------- next part --------------
A non-text attachment was scrubbed...
Name: radeon-test-all-rings.diff
Type: text/x-patch
Size: 1118 bytes
Desc: not available
URL: <http://lists.freedesktop.org/archives/dri-devel/attachments/20140820/a78e10b9/attachment.bin>


More information about the dri-devel mailing list