[PATCH v2 13/15] drm/amdgpu: introduce pcie port read/write entry

Huang Rui ray.huang at amd.com
Wed Aug 31 05:23:25 UTC 2016


This patch adds pcie port read/write entry, because it will be also
used on si dpm part.

Acked-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
Signed-off-by: Huang Rui <ray.huang at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |   4 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   2 +
 drivers/gpu/drm/amd/amdgpu/si.c            | 106 +++++++++++++++--------------
 3 files changed, 60 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 19c9a2e..6eb1d3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2043,6 +2043,8 @@ struct amdgpu_device {
 	spinlock_t pcie_idx_lock;
 	amdgpu_rreg_t			pcie_rreg;
 	amdgpu_wreg_t			pcie_wreg;
+	amdgpu_rreg_t			pciep_rreg;
+	amdgpu_wreg_t			pciep_wreg;
 	/* protects concurrent UVD register access */
 	spinlock_t uvd_ctx_idx_lock;
 	amdgpu_rreg_t			uvd_ctx_rreg;
@@ -2183,6 +2185,8 @@ bool amdgpu_device_has_dal_support(struct amdgpu_device *adev);
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
+#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8eb5396..a20a2b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1544,6 +1544,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->smc_wreg = &amdgpu_invalid_wreg;
 	adev->pcie_rreg = &amdgpu_invalid_rreg;
 	adev->pcie_wreg = &amdgpu_invalid_wreg;
+	adev->pciep_rreg = &amdgpu_invalid_rreg;
+	adev->pciep_wreg = &amdgpu_invalid_wreg;
 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
 	adev->didt_rreg = &amdgpu_invalid_rreg;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index d5fc197..fee76b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -905,6 +905,31 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
+u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	r = RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	WREG32(PCIE_PORT_DATA, (v));
+	(void)RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
 static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
 {
 	unsigned long flags;
@@ -1125,6 +1150,8 @@ static int si_common_early_init(void *handle)
 	adev->smc_wreg = &si_smc_wreg;
 	adev->pcie_rreg = &si_pcie_rreg;
 	adev->pcie_wreg = &si_pcie_wreg;
+	adev->pciep_rreg = &si_pciep_rreg;
+	adev->pciep_wreg = &si_pciep_wreg;
 	adev->uvd_ctx_rreg = NULL;
 	adev->uvd_ctx_wreg = NULL;
 	adev->didt_rreg = NULL;
@@ -1316,31 +1343,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
 	}
 }
 
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
-{
-	unsigned long flags;
-	u32 r;
-
-	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
-	(void)RREG32(PCIE_PORT_INDEX);
-	r = RREG32(PCIE_PORT_DATA);
-	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-	return r;
-}
-
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
-	(void)RREG32(PCIE_PORT_INDEX);
-	WREG32(PCIE_PORT_DATA, (v));
-	(void)RREG32(PCIE_PORT_DATA);
-	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-}
-
 static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 {
 	struct pci_dev *root = adev->pdev->bus->self;
@@ -1365,7 +1367,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
 		return;
 
-	speed_cntl = si_pciep_rreg(adev,PCIE_LC_SPEED_CNTL);
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
 	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
 		LC_CURRENT_DATA_RATE_SHIFT;
 	if (mask & DRM_PCIE_SPEED_80) {
@@ -1410,12 +1412,12 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
 
 			if (current_lw < max_lw) {
-				tmp = si_pciep_rreg(adev, PCIE_LC_LINK_WIDTH_CNTL);
+				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
 				if (tmp & LC_RENEGOTIATION_SUPPORT) {
 					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
 					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
 					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
-					si_pciep_wreg(adev, PCIE_LC_LINK_WIDTH_CNTL, tmp);
+					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
 				}
 			}
 
@@ -1430,13 +1432,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
 				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
 
-				tmp = si_pciep_rreg(adev, PCIE_LC_CNTL4);
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
 				tmp |= LC_SET_QUIESCE;
-				si_pciep_wreg(adev,PCIE_LC_CNTL4, tmp);
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
 
-				tmp = si_pciep_rreg(adev, PCIE_LC_CNTL4);
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
 				tmp |= LC_REDO_EQ;
-				si_pciep_wreg(adev, PCIE_LC_CNTL4, tmp);
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
 
 				mdelay(100);
 
@@ -1460,16 +1462,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
 				pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
-				tmp = si_pciep_rreg(adev, PCIE_LC_CNTL4);
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
 				tmp &= ~LC_SET_QUIESCE;
-				si_pciep_wreg(adev, PCIE_LC_CNTL4, tmp);
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
 			}
 		}
 	}
 
 	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
 	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
-	si_pciep_wreg(adev, PCIE_LC_SPEED_CNTL, speed_cntl);
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
 	pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
 	tmp16 &= ~0xf;
@@ -1481,12 +1483,12 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 		tmp16 |= 1;
 	pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
-	speed_cntl = si_pciep_rreg(adev, PCIE_LC_SPEED_CNTL);
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
 	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
-	si_pciep_wreg(adev, PCIE_LC_SPEED_CNTL, speed_cntl);
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		speed_cntl = si_pciep_rreg(adev, PCIE_LC_SPEED_CNTL);
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
 		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
 			break;
 		udelay(1);
@@ -1547,23 +1549,23 @@ static void si_program_aspm(struct amdgpu_device *adev)
 
 	if (adev->flags & AMD_IS_APU)
 		return;
-	orig = data = si_pciep_rreg(adev, PCIE_LC_N_FTS_CNTL);
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
 	data &= ~LC_XMIT_N_FTS_MASK;
 	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
 	if (orig != data)
-		si_pciep_wreg(adev, PCIE_LC_N_FTS_CNTL, data);
+		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
 
-	orig = data = si_pciep_rreg(adev, PCIE_LC_CNTL3);
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
 	data |= LC_GO_TO_RECOVERY;
 	if (orig != data)
-		si_pciep_wreg(adev, PCIE_LC_CNTL3, data);
+		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
 
 	orig = data = RREG32_PCIE(PCIE_P_CNTL);
 	data |= P_IGNORE_EDB_ERR;
 	if (orig != data)
 		WREG32_PCIE(PCIE_P_CNTL, data);
 
-	orig = data = si_pciep_rreg(adev, PCIE_LC_CNTL);
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
 	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
 	data |= LC_PMI_TO_L1_DIS;
 	if (!disable_l0s)
@@ -1573,7 +1575,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
 		data |= LC_L1_INACTIVITY(7);
 		data &= ~LC_PMI_TO_L1_DIS;
 		if (orig != data)
-			si_pciep_wreg(adev, PCIE_LC_CNTL, data);
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
 
 		if (!disable_plloff_in_l1) {
 			bool clk_req_support;
@@ -1643,11 +1645,11 @@ static void si_program_aspm(struct amdgpu_device *adev)
 				if (orig != data)
 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
 			}
-			orig = data = si_pciep_rreg(adev, PCIE_LC_LINK_WIDTH_CNTL);
+			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
 			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
 			data |= LC_DYN_LANES_PWR_STATE(3);
 			if (orig != data)
-				si_pciep_wreg(adev, PCIE_LC_LINK_WIDTH_CNTL, data);
+				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
 
 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
 			data &= ~LS2_EXIT_TIME_MASK;
@@ -1677,10 +1679,10 @@ static void si_program_aspm(struct amdgpu_device *adev)
 			}
 
 			if (clk_req_support) {
-				orig = data = si_pciep_rreg(adev, PCIE_LC_CNTL2);
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
 				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
 				if (orig != data)
-					si_pciep_wreg(adev, PCIE_LC_CNTL2, data);
+					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
 
 				orig = data = RREG32(THM_CLK_CNTL);
 				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
@@ -1718,7 +1720,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
 		}
 	} else {
 		if (orig != data)
-			si_pciep_wreg(adev, PCIE_LC_CNTL, data);
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
 	}
 
 	orig = data = RREG32_PCIE(PCIE_CNTL2);
@@ -1727,14 +1729,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
 		WREG32_PCIE(PCIE_CNTL2, data);
 
 	if (!disable_l0s) {
-		data = si_pciep_rreg(adev, PCIE_LC_N_FTS_CNTL);
+		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
 		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
 			data = RREG32_PCIE(PCIE_LC_STATUS1);
 			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
-				orig = data = si_pciep_rreg(adev, PCIE_LC_CNTL);
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
 				data &= ~LC_L0S_INACTIVITY_MASK;
 				if (orig != data)
-					si_pciep_wreg(adev, PCIE_LC_CNTL, data);
+					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
 			}
 		}
 	}
-- 
2.7.4



More information about the amd-gfx mailing list