[PATCH 2/2] drm/radeon: cleanup the fence ring locking code

Christian König deathsimple at vodafone.de
Tue Feb 18 08:52:30 PST 2014


From: Christian König <christian.koenig at amd.com>

We no longer need to take the ring lock while checking for
a gpu lockup, so just cleanup the code.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/radeon/radeon.h           |  5 ++-
 drivers/gpu/drm/radeon/radeon_device.c    |  4 +--
 drivers/gpu/drm/radeon/radeon_fence.c     | 59 ++++++-------------------------
 drivers/gpu/drm/radeon/radeon_pm.c        |  4 +--
 drivers/gpu/drm/radeon/radeon_ring.c      |  2 +-
 drivers/gpu/drm/radeon/radeon_semaphore.c | 12 +++++--
 6 files changed, 25 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9269413..7337b35 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -360,9 +360,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_locked(struct radeon_fence *fence);
-int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
 int radeon_fence_wait_any(struct radeon_device *rdev,
 			  struct radeon_fence **fences,
 			  bool intr);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b012cbb..fa7841b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1445,10 +1445,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 	/* evict vram memory */
 	radeon_bo_evict_vram(rdev);
 
-	mutex_lock(&rdev->ring_lock);
 	/* wait for gpu to finish processing current batch */
 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
-		r = radeon_fence_wait_empty_locked(rdev, i);
+		r = radeon_fence_wait_empty(rdev, i);
 		if (r) {
 			/* delay GPU reset to resume */
 			force_completion = true;
@@ -1457,7 +1456,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 	if (force_completion) {
 		radeon_fence_driver_force_completion(rdev);
 	}
-	mutex_unlock(&rdev->ring_lock);
 
 	radeon_save_bios_scratch_regs(rdev);
 
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index c37cb79..a77b1c1 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
  * @rdev: radeon device pointer
  * @target_seq: sequence number(s) we want to wait for
  * @intr: use interruptable sleep
- * @lock_ring: whether the ring should be locked or not
  *
  * Wait for the requested sequence number(s) to be written by any ring
  * (all asics).  Sequnce number array is indexed by ring id.
@@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
  * -EDEADLK is returned when a GPU lockup has been detected.
  */
 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
-				 bool intr, bool lock_ring)
+				 bool intr)
 {
 	uint64_t last_seq[RADEON_NUM_RINGS];
 	bool signaled;
@@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
 			if (i != RADEON_NUM_RINGS)
 				continue;
 
-			if (lock_ring)
-				mutex_lock(&rdev->ring_lock);
-
 			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 				if (!target_seq[i])
 					continue;
@@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
 
 				/* remember that we need an reset */
 				rdev->needs_reset = true;
-				if (lock_ring)
-					mutex_unlock(&rdev->ring_lock);
 				wake_up_all(&rdev->fence_queue);
 				return -EDEADLK;
 			}
-
-			if (lock_ring)
-				mutex_unlock(&rdev->ring_lock);
 		}
 	}
 	return 0;
@@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 	if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
 		return 0;
 
-	r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+	r = radeon_fence_wait_seq(fence->rdev, seq, intr);
 	if (r)
 		return r;
 
@@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
 	if (num_rings == 0)
 		return -ENOENT;
 
-	r = radeon_fence_wait_seq(rdev, seq, intr, true);
+	r = radeon_fence_wait_seq(rdev, seq, intr);
 	if (r) {
 		return r;
 	}
@@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
 }
 
 /**
- * radeon_fence_wait_locked - wait for a fence to signal
- *
- * @fence: radeon fence object
- *
- * Wait for the requested fence to signal (all asics).
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int radeon_fence_wait_locked(struct radeon_fence *fence)
-{
-	uint64_t seq[RADEON_NUM_RINGS] = {};
-	int r;
-
-	if (fence == NULL) {
-		WARN(1, "Querying an invalid fence : %p !\n", fence);
-		return -EINVAL;
-	}
-
-	seq[fence->ring] = fence->seq;
-	if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
-		return 0;
-
-	r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
-	if (r)
-		return r;
-
-	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
-	return 0;
-}
-
-/**
- * radeon_fence_wait_next_locked - wait for the next fence to signal
+ * radeon_fence_wait_next - wait for the next fence to signal
  *
  * @rdev: radeon device pointer
  * @ring: ring index the fence is associated with
@@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence)
  * Returns 0 if the next fence has passed, error for all other cases.
  * Caller must hold ring lock.
  */
-int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 {
 	uint64_t seq[RADEON_NUM_RINGS] = {};
 
@@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
 		   already the last emited fence */
 		return -ENOENT;
 	}
-	return radeon_fence_wait_seq(rdev, seq, false, false);
+	return radeon_fence_wait_seq(rdev, seq, false);
 }
 
 /**
- * radeon_fence_wait_empty_locked - wait for all fences to signal
+ * radeon_fence_wait_empty - wait for all fences to signal
  *
  * @rdev: radeon device pointer
  * @ring: ring index the fence is associated with
@@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
  * Returns 0 if the fences have passed, error for all other cases.
  * Caller must hold ring lock.
  */
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 {
 	uint64_t seq[RADEON_NUM_RINGS] = {};
 	int r;
@@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
 	if (!seq[ring])
 		return 0;
 
-	r = radeon_fence_wait_seq(rdev, seq, false, false);
+	r = radeon_fence_wait_seq(rdev, seq, false);
 	if (r) {
 		if (r == -EDEADLK)
 			return -EDEADLK;
@@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
 	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 		if (!rdev->fence_drv[ring].initialized)
 			continue;
-		r = radeon_fence_wait_empty_locked(rdev, ring);
+		r = radeon_fence_wait_empty(rdev, ring);
 		if (r) {
 			/* no need to trigger GPU reset as we are unloading */
 			radeon_fence_driver_force_completion(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4ad9af9..0119af4 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -260,7 +260,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 		if (!ring->ready) {
 			continue;
 		}
-		r = radeon_fence_wait_empty_locked(rdev, i);
+		r = radeon_fence_wait_empty(rdev, i);
 		if (r) {
 			/* needs a GPU reset dont reset here */
 			mutex_unlock(&rdev->ring_lock);
@@ -896,7 +896,7 @@ force:
 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
 		struct radeon_ring *ring = &rdev->ring[i];
 		if (ring->ready)
-			radeon_fence_wait_empty_locked(rdev, i);
+			radeon_fence_wait_empty(rdev, i);
 	}
 
 	/* program the new power state */
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index ae72108..767b5e7 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -382,7 +382,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
 		if (ndw < ring->ring_free_dw) {
 			break;
 		}
-		r = radeon_fence_wait_next_locked(rdev, ring->idx);
+		r = radeon_fence_wait_next(rdev, ring->idx);
 		if (r)
 			return r;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 9006b32..6140af6 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -147,7 +147,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
 
 		if (++count > RADEON_NUM_SYNCS) {
 			/* not enough room, wait manually */
-			radeon_fence_wait_locked(fence);
+			r = radeon_fence_wait(fence, false);
+			if (r)
+				return r;
 			continue;
 		}
 
@@ -161,7 +163,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
 		if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
 			/* signaling wasn't successful wait manually */
 			radeon_ring_undo(&rdev->ring[i]);
-			radeon_fence_wait_locked(fence);
+			r = radeon_fence_wait(fence, false);
+			if (r)
+				return r;
 			continue;
 		}
 
@@ -169,7 +173,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
 		if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
 			/* waiting wasn't successful wait manually */
 			radeon_ring_undo(&rdev->ring[i]);
-			radeon_fence_wait_locked(fence);
+			r = radeon_fence_wait(fence, false);
+			if (r)
+				return r;
 			continue;
 		}
 
-- 
1.8.3.2



More information about the dri-devel mailing list