[PATCH 13/28] drm/amdgpu: add master/slave check in init phase

Alex Deucher alexander.deucher at amd.com
Mon Mar 27 19:35:47 UTC 2023


From: Le Ma <le.ma at amd.com>

Skip KCQ setup on slave xcc as there's no use case.

Signed-off-by: Le Ma <le.ma at amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 59 +++++++++++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c  |  5 +++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 10 +++--
 4 files changed, 50 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 7f5c60381103..c83fb4277233 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -489,16 +489,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
 		return -EINVAL;
 
 	spin_lock(&kiq->ring_lock);
-	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
-					adev->gfx.num_compute_rings)) {
-		spin_unlock(&adev->gfx.kiq[0].ring_lock);
-		return -ENOMEM;
-	}
+	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+		if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+						adev->gfx.num_compute_rings)) {
+			spin_unlock(&kiq->ring_lock);
+			return -ENOMEM;
+		}
 
-	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-		j = i + xcc_id * adev->gfx.num_compute_rings;
-		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
-					   RESET_QUEUES, 0, 0);
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			j = i + xcc_id * adev->gfx.num_compute_rings;
+			kiq->pmf->kiq_unmap_queues(kiq_ring,
+						   &adev->gfx.compute_ring[i],
+						   RESET_QUEUES, 0, 0);
+		}
 	}
 
 	if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
@@ -549,22 +552,26 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
 	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
 							kiq_ring->queue);
 	spin_lock(&kiq->ring_lock);
-	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
-					adev->gfx.num_compute_rings +
-					kiq->pmf->set_resources_size);
-	if (r) {
-		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-		spin_unlock(&adev->gfx.kiq[0].ring_lock);
-		return r;
-	}
+	/* No need to map kcq on the slave */
+	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+						adev->gfx.num_compute_rings +
+						kiq->pmf->set_resources_size);
+		if (r) {
+			DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+			spin_unlock(&adev->gfx.kiq[0].ring_lock);
+			return r;
+		}
 
-	if (adev->enable_mes)
-		queue_mask = ~0ULL;
+		if (adev->enable_mes)
+			queue_mask = ~0ULL;
 
-	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
-	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-		j = i + xcc_id * adev->gfx.num_compute_rings;
-		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
+		kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			j = i + xcc_id * adev->gfx.num_compute_rings;
+			kiq->pmf->kiq_map_queues(kiq_ring,
+						 &adev->gfx.compute_ring[i]);
+		}
 	}
 
 	r = amdgpu_ring_test_helper(kiq_ring);
@@ -1078,3 +1085,9 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
 		adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
 	}
 }
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
+{
+	return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
+			adev->gfx.num_xcc_per_xcp : 1));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 1f7010041431..4c5881a24f6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -462,4 +462,6 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id)
 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
 						struct amdgpu_iv_entry *entry);
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 2b6fdcf3bf60..15fd61c202c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -407,6 +407,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 		else
 			tmo = tmo_gfx;
 
+		/* skip ib test on the slave kcq */
+		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
+		    !amdgpu_gfx_is_master_xcc(adev, ring->xcc_id))
+			continue;
+
 		r = amdgpu_ring_test_ib(ring, tmo);
 		if (!r) {
 			DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 0e162189d860..b9b02fa15c7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1886,9 +1886,13 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
 		if (r)
 			return r;
 
-		for (j = 0; j < adev->gfx.num_compute_rings; j++) {
-			ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings];
-			amdgpu_ring_test_helper(ring);
+		/* skip ring test on slave kcq */
+		if (amdgpu_gfx_is_master_xcc(adev, i)) {
+			for (j = 0; j < adev->gfx.num_compute_rings; j++) {
+				ring = &adev->gfx.compute_ring[j +
+					i * adev->gfx.num_compute_rings];
+				amdgpu_ring_test_helper(ring);
+			}
 		}
 
 		gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i);
-- 
2.39.2



More information about the amd-gfx mailing list