[PATCH 02/16] drm/radeon: remove radeon_ring_index()

alexdeucher at gmail.com alexdeucher at gmail.com
Tue Jul 17 11:02:30 PDT 2012


From: Alex Deucher <alexander.deucher at amd.com>

Just store the index in the ring structure.
Idea taken from one of Jerome's wip rptr patches.

Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
 drivers/gpu/drm/radeon/r600.c          |   11 ++++-----
 drivers/gpu/drm/radeon/radeon.h        |    2 +-
 drivers/gpu/drm/radeon/radeon_device.c |    4 +++
 drivers/gpu/drm/radeon/radeon_ring.c   |   20 +----------------
 drivers/gpu/drm/radeon/radeon_test.c   |   35 +++++++++++++------------------
 5 files changed, 27 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c2e5069..9f24a80 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2198,7 +2198,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
 	uint32_t scratch;
 	uint32_t tmp = 0;
-	unsigned i, ridx = radeon_ring_index(rdev, ring);
+	unsigned i;
 	int r;
 
 	r = radeon_scratch_get(rdev, &scratch);
@@ -2209,7 +2209,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	WREG32(scratch, 0xCAFEDEAD);
 	r = radeon_ring_lock(rdev, ring, 3);
 	if (r) {
-		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
+		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
 		radeon_scratch_free(rdev, scratch);
 		return r;
 	}
@@ -2224,10 +2224,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 		DRM_UDELAY(1);
 	}
 	if (i < rdev->usec_timeout) {
-		DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 	} else {
 		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
-			  ridx, scratch, tmp);
+			  ring->idx, scratch, tmp);
 		r = -EINVAL;
 	}
 	radeon_scratch_free(rdev, scratch);
@@ -2602,7 +2602,6 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	uint32_t tmp = 0;
 	unsigned i;
 	int r;
-	int ring_index = radeon_ring_index(rdev, ring);
 
 	r = radeon_scratch_get(rdev, &scratch);
 	if (r) {
@@ -2610,7 +2609,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
-	r = radeon_ib_get(rdev, ring_index, &ib, 256);
+	r = radeon_ib_get(rdev, ring->idx, &ib, 256);
 	if (r) {
 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
 		return r;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2d7f06c..be45472 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -638,6 +638,7 @@ struct radeon_ring {
 	u32			ptr_reg_shift;
 	u32			ptr_reg_mask;
 	u32			nop;
+	u32			idx;
 };
 
 /*
@@ -757,7 +758,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev);
 void radeon_ib_pool_fini(struct radeon_device *rdev);
 int radeon_ib_ring_tests(struct radeon_device *rdev);
 /* Ring access between begin & end cannot sleep */
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0302a9f..6c534f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -721,6 +721,10 @@ int radeon_device_init(struct radeon_device *rdev,
 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
 	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
 	rdev->accel_working = false;
+	/* set up ring ids */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		rdev->ring[i].idx = i;
+	}
 
 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
 		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index c48c354..c5828b9 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -207,21 +207,6 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 	ring->ring_free_dw--;
 }
 
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
-{
-	/* r1xx-r5xx only has CP ring */
-	if (rdev->family < CHIP_R600)
-		return RADEON_RING_TYPE_GFX_INDEX;
-
-	if (rdev->family >= CHIP_CAYMAN) {
-		if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
-			return CAYMAN_RING_TYPE_CP1_INDEX;
-		else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
-			return CAYMAN_RING_TYPE_CP2_INDEX;
-	}
-	return RADEON_RING_TYPE_GFX_INDEX;
-}
-
 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
 {
 	u32 rptr;
@@ -253,7 +238,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
 		if (ndw < ring->ring_free_dw) {
 			break;
 		}
-		r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
+		r = radeon_fence_wait_next_locked(rdev, ring->idx);
 		if (r)
 			return r;
 	}
@@ -382,7 +367,6 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
 			    uint32_t **data)
 {
 	unsigned size, ptr, i;
-	int ridx = radeon_ring_index(rdev, ring);
 
 	/* just in case lock the ring */
 	mutex_lock(&rdev->ring_lock);
@@ -394,7 +378,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
 	}
 
 	/* it doesn't make sense to save anything if all fences are signaled */
-	if (!radeon_fence_count_emitted(rdev, ridx)) {
+	if (!radeon_fence_count_emitted(rdev, ring->idx)) {
 		mutex_unlock(&rdev->ring_lock);
 		return 0;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index a94f66f..7c16540 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -229,8 +229,6 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
 {
 	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
 	struct radeon_semaphore *semaphore = NULL;
-	int ridxA = radeon_ring_index(rdev, ringA);
-	int ridxB = radeon_ring_index(rdev, ringB);
 	int r;
 
 	r = radeon_semaphore_create(rdev, &semaphore);
@@ -241,18 +239,18 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
 
 	r = radeon_ring_lock(rdev, ringA, 64);
 	if (r) {
-		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
-	r = radeon_fence_emit(rdev, &fence1, ridxA);
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fence1, ringA->idx);
 	if (r) {
 		DRM_ERROR("Failed to emit fence 1\n");
 		radeon_ring_unlock_undo(rdev, ringA);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
-	r = radeon_fence_emit(rdev, &fence2, ridxA);
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fence2, ringA->idx);
 	if (r) {
 		DRM_ERROR("Failed to emit fence 2\n");
 		radeon_ring_unlock_undo(rdev, ringA);
@@ -272,7 +270,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
 		DRM_ERROR("Failed to lock ring B %p\n", ringB);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
 	radeon_ring_unlock_commit(rdev, ringB);
 
 	r = radeon_fence_wait(fence1, false);
@@ -293,7 +291,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
 		DRM_ERROR("Failed to lock ring B %p\n", ringB);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
 	radeon_ring_unlock_commit(rdev, ringB);
 
 	r = radeon_fence_wait(fence2, false);
@@ -322,9 +320,6 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
 {
 	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
 	struct radeon_semaphore *semaphore = NULL;
-	int ridxA = radeon_ring_index(rdev, ringA);
-	int ridxB = radeon_ring_index(rdev, ringB);
-	int ridxC = radeon_ring_index(rdev, ringC);
 	bool sigA, sigB;
 	int i, r;
 
@@ -336,11 +331,11 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
 
 	r = radeon_ring_lock(rdev, ringA, 64);
 	if (r) {
-		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
-	r = radeon_fence_emit(rdev, &fenceA, ridxA);
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
 	if (r) {
 		DRM_ERROR("Failed to emit sync fence 1\n");
 		radeon_ring_unlock_undo(rdev, ringA);
@@ -350,11 +345,11 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
 
 	r = radeon_ring_lock(rdev, ringB, 64);
 	if (r) {
-		DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
-	r = radeon_fence_emit(rdev, &fenceB, ridxB);
+	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
+	r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
 	if (r) {
 		DRM_ERROR("Failed to create sync fence 2\n");
 		radeon_ring_unlock_undo(rdev, ringB);
@@ -378,7 +373,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
 		DRM_ERROR("Failed to lock ring B %p\n", ringC);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
 	radeon_ring_unlock_commit(rdev, ringC);
 
 	for (i = 0; i < 30; ++i) {
@@ -404,7 +399,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
 		DRM_ERROR("Failed to lock ring B %p\n", ringC);
 		goto out_cleanup;
 	}
-	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
 	radeon_ring_unlock_commit(rdev, ringC);
 
 	mdelay(1000);
-- 
1.7.7.5



More information about the dri-devel mailing list