[PATCH v2 2/3] drm/amdgpu: sdma support for sriov cpx mode

Samir Dhume samir.dhume at amd.com
Fri Mar 15 19:50:58 UTC 2024


sdma has 2 instances in SRIOV cpx mode. Odd numbered VFs have
sdma0/sdma1 instances. Even numbered vfs have sdma2/sdma3.
Changes involve
1. identifying odd/even numbered VF
2. registering correct number of instances with irq handler
3. mapping instance number with IH client-id depending upon
whether vf is odd/even numbered.

v2:
1. fix for correct number of instances registered with irq
2. remove mmio access from interrupt handler. Use xcc_mask to
detect cpx mode.

Signed-off-by: Samir Dhume <samir.dhume at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 63 ++++++++++++++++--------
 1 file changed, 43 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index eaa4f5f49949..117a7c692c0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -66,13 +66,28 @@ static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
 	return (adev->reg_offset[SDMA0_HWIP][dev_inst][0] + offset);
 }
 
-static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
+static unsigned sdma_v4_4_2_seq_to_irq_id(struct amdgpu_device *adev, int seq_num)
 {
+	bool sriov_cpx_odd = false;
+
+	/* check for sriov cpx mode odd/even vf */
+	if (amdgpu_sriov_vf(adev)) {
+		if (adev->gfx.xcc_mask == 0x1)
+			if (adev->gfx.funcs->get_xcc_id(adev, 0) & 0x1)
+				sriov_cpx_odd = true;
+	}
+
 	switch (seq_num) {
 	case 0:
-		return SOC15_IH_CLIENTID_SDMA0;
+		if (sriov_cpx_odd)
+			return SOC15_IH_CLIENTID_SDMA2;
+		else
+			return SOC15_IH_CLIENTID_SDMA0;
 	case 1:
-		return SOC15_IH_CLIENTID_SDMA1;
+		if (sriov_cpx_odd)
+			return SOC15_IH_CLIENTID_SDMA3;
+		else
+			return SOC15_IH_CLIENTID_SDMA1;
 	case 2:
 		return SOC15_IH_CLIENTID_SDMA2;
 	case 3:
@@ -82,7 +97,7 @@ static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
 	}
 }
 
-static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
+static int sdma_v4_4_2_irq_id_to_seq(struct amdgpu_device *adev, unsigned client_id)
 {
 	switch (client_id) {
 	case SOC15_IH_CLIENTID_SDMA0:
@@ -90,9 +105,15 @@ static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
 	case SOC15_IH_CLIENTID_SDMA1:
 		return 1;
 	case SOC15_IH_CLIENTID_SDMA2:
-		return 2;
+		if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1))
+			return 0;
+		else
+			return 2;
 	case SOC15_IH_CLIENTID_SDMA3:
-		return 3;
+		if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1))
+			return 1;
+		else
+			return 3;
 	default:
 		return -EINVAL;
 	}
@@ -1300,13 +1321,15 @@ static int sdma_v4_4_2_late_init(void *handle)
 static int sdma_v4_4_2_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
-	int r, i;
+	int r, i, num_irq_inst;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 aid_id;
 
+	num_irq_inst = min(adev->sdma.num_instances, adev->sdma.num_inst_per_aid);
+
 	/* SDMA trap event */
-	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
-		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+	for (i = 0; i < num_irq_inst; i++) {
+		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(adev, i),
 				      SDMA0_4_0__SRCID__SDMA_TRAP,
 				      &adev->sdma.trap_irq);
 		if (r)
@@ -1314,8 +1337,8 @@ static int sdma_v4_4_2_sw_init(void *handle)
 	}
 
 	/* SDMA SRAM ECC event */
-	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
-		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+	for (i = 0; i < num_irq_inst; i++) {
+		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(adev, i),
 				      SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
 				      &adev->sdma.ecc_irq);
 		if (r)
@@ -1323,26 +1346,26 @@ static int sdma_v4_4_2_sw_init(void *handle)
 	}
 
 	/* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
-	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
-		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+	for (i = 0; i < num_irq_inst; i++) {
+		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(adev, i),
 				      SDMA0_4_0__SRCID__SDMA_VM_HOLE,
 				      &adev->sdma.vm_hole_irq);
 		if (r)
 			return r;
 
-		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(adev, i),
 				      SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
 				      &adev->sdma.doorbell_invalid_irq);
 		if (r)
 			return r;
 
-		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(adev, i),
 				      SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
 				      &adev->sdma.pool_timeout_irq);
 		if (r)
 			return r;
 
-		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(adev, i),
 				      SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
 				      &adev->sdma.srbm_write_irq);
 		if (r)
@@ -1541,7 +1564,7 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
 	uint32_t instance, i;
 
 	DRM_DEBUG("IH: SDMA trap\n");
-	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
 
 	/* Client id gives the SDMA instance in AID. To know the exact SDMA
 	 * instance, interrupt entry gives the node id which corresponds to the AID instance.
@@ -1584,7 +1607,7 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA))
 		goto out;
 
-	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
 	if (instance < 0)
 		goto out;
 
@@ -1603,7 +1626,7 @@ static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
 
 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
 
-	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
 	if (instance < 0)
 		return 0;
 
@@ -1647,7 +1670,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
 	struct amdgpu_task_info *task_info;
 	u64 addr;
 
-	instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
 	if (instance < 0 || instance >= adev->sdma.num_instances) {
 		dev_err(adev->dev, "sdma instance invalid %d\n", instance);
 		return -EINVAL;
-- 
2.34.1



More information about the amd-gfx mailing list