[PATCH 11/18] drm/radeon: use one wait queue for all rings add fence_wait_any

j.glisse at gmail.com j.glisse at gmail.com
Fri May 4 15:43:52 PDT 2012


From: Jerome Glisse <jglisse at redhat.com>

Use one wait queue for all rings. When one ring progress, other
likely does to and we are not expecting to have a lot of waiter
anyway.

Also add a fence_wait_any that will wait until the first fence
in the fence array (one fence per ring) is signaled. This allow
to wait on all rings.

Signed-off-by: Christian König <deathsimple at vodafone.de>
Signed-off-by: Jerome Glisse <jglisse at redhat.com>
---
 drivers/gpu/drm/radeon/radeon.h       |    5 +-
 drivers/gpu/drm/radeon/radeon_fence.c |  151 +++++++++++++++++++++++++++++++-
 2 files changed, 150 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c78f15b..6f8d0f5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -262,7 +262,6 @@ struct radeon_fence_driver {
 	uint64_t			seq;
 	atomic64_t			last_seq;
 	unsigned long			last_activity;
-	wait_queue_head_t		queue;
 	bool				initialized;
 };
 
@@ -286,6 +285,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
 int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr);
 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
 void radeon_fence_unref(struct radeon_fence **fence);
 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
@@ -1516,6 +1518,7 @@ struct radeon_device {
 	struct radeon_scratch		scratch;
 	struct radeon_mman		mman;
 	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
+	wait_queue_head_t		fence_queue;
 	struct radeon_semaphore_driver	semaphore_drv;
 	struct mutex			ring_lock;
 	struct radeon_ring		ring[RADEON_NUM_RINGS];
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 99c31b2..9c2c1b3 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -233,11 +233,11 @@ static int radeon_wait_seq(struct radeon_device *rdev, u64 target_seq,
 		trace_radeon_fence_wait_begin(rdev->ddev, seq);
 		radeon_irq_kms_sw_irq_get(rdev, ring);
 		if (intr) {
-			r = wait_event_interruptible_timeout(rdev->fence_drv[ring].queue,
+			r = wait_event_interruptible_timeout(rdev->fence_queue,
 							     (signaled = radeon_seq_signaled(rdev, target_seq, ring)),
 							     timeout);
 		} else {
-			r = wait_event_timeout(rdev->fence_drv[ring].queue,
+			r = wait_event_timeout(rdev->fence_queue,
 					       (signaled = radeon_seq_signaled(rdev, target_seq, ring)),
 					       timeout);
 		}
@@ -304,6 +304,147 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 	return 0;
 }
 
+bool radeon_seq_any_signaled(struct radeon_device *rdev, u64 *seq)
+{
+	unsigned i, j;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		/* poll new last sequence at least once */
+		for (j = 0; j < 2; j++) {
+			if (atomic64_read(&rdev->fence_drv[i].last_seq) >= seq[i]) {
+				return true;
+			}
+			radeon_fence_poll(rdev, i);
+		}
+	}
+	return false;
+}
+
+static int radeon_wait_seq_any(struct radeon_device *rdev, u64 *target_seq,
+			       bool intr)
+{
+	unsigned long timeout, last_activity, tmp;
+	unsigned i, ring = 0;
+	bool signaled;
+	int r;
+
+	/* use the most recent one as indicator */
+	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+			last_activity = rdev->fence_drv[i].last_activity;
+		}
+		/* For lockup detection we just pick one of the ring we are
+		 * actively waiting for
+		 */
+		if (target_seq[i]) {
+			ring = i;
+		}
+	}
+	while (!radeon_seq_any_signaled(rdev, target_seq)) {
+		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+		if (time_after(last_activity, timeout)) {
+			/* the normal case, timeout is somewhere before last_activity */
+			timeout = last_activity - timeout;
+		} else {
+			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
+			 * anyway we will just wait for the minimum amount and then check for a lockup
+			 */
+			timeout = 1;
+		}
+
+		trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (target_seq[i]) {
+				radeon_irq_kms_sw_irq_get(rdev, i);
+			}
+		}
+		if (intr) {
+			r = wait_event_interruptible_timeout(rdev->fence_queue,
+							     (signaled = radeon_seq_any_signaled(rdev, target_seq)),
+							     timeout);
+		} else {
+			r = wait_event_timeout(rdev->fence_queue,
+					       (signaled = radeon_seq_any_signaled(rdev, target_seq)),
+					       timeout);
+		}
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (target_seq[i]) {
+				radeon_irq_kms_sw_irq_put(rdev, i);
+			}
+		}
+		if (unlikely(r < 0)) {
+			return r;
+		}
+		trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+		if (unlikely(!signaled)) {
+			/* we were interrupted for some reason and fence
+			 * isn't signaled yet, resume waiting */
+			if (r) {
+				continue;
+			}
+
+			mutex_lock(&rdev->ring_lock);
+			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+					tmp = rdev->fence_drv[i].last_activity;
+				}
+			}
+			/* test if somebody else has already decided that this is a lockup */
+			if (last_activity != tmp) {
+				last_activity = tmp;
+				mutex_unlock(&rdev->ring_lock);
+				continue;
+			}
+
+			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+				/* good news we believe it's a lockup */
+				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+					 target_seq[ring]);
+
+				/* change last activity so nobody else think there is a lockup */
+				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+					rdev->fence_drv[i].last_activity = jiffies;
+				}
+
+				/* mark the ring as not ready any more */
+				rdev->ring[ring].ready = false;
+				mutex_unlock(&rdev->ring_lock);
+				return -EDEADLK;
+			}
+			mutex_unlock(&rdev->ring_lock);
+		}
+	}
+	return 0;
+}
+
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr)
+{
+	uint64_t seq[RADEON_NUM_RINGS];
+	unsigned i, c;
+	int r;
+
+	for (i = 0, c = 0; i < RADEON_NUM_RINGS; ++i) {
+		seq[i] = 0;
+		if (fences[i] && fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+			seq[i] = fences[i]->seq;
+			c++;
+		}
+	}
+	if (!c) {
+		/* nothing to wait for */
+		return 0;
+	}
+
+	r = radeon_wait_seq_any(rdev, seq, intr);
+	if (r) {
+		return r;
+	}
+	return 0;
+}
+
 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 {
 	uint64_t seq;
@@ -352,7 +493,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
 
 	wake = radeon_fence_poll(rdev, ring);
 	if (wake) {
-		wake_up_all(&rdev->fence_drv[ring].queue);
+		wake_up_all(&rdev->fence_queue);
 	}
 }
 
@@ -409,7 +550,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
 	rdev->fence_drv[ring].seq = 1;
 	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
 	rdev->fence_drv[ring].last_activity = jiffies;
-	init_waitqueue_head(&rdev->fence_drv[ring].queue);
 	rdev->fence_drv[ring].initialized = false;
 }
 
@@ -417,6 +557,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
 {
 	int ring;
 
+	init_waitqueue_head(&rdev->fence_queue);
 	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 		radeon_fence_driver_init_ring(rdev, ring);
 	}
@@ -434,7 +575,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
 		if (!rdev->fence_drv[ring].initialized)
 			continue;
 		radeon_fence_wait_empty(rdev, ring);
-		wake_up_all(&rdev->fence_drv[ring].queue);
+		wake_up_all(&rdev->fence_queue);
 		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 		rdev->fence_drv[ring].initialized = false;
 	}
-- 
1.7.7.6



More information about the dri-devel mailing list